Merge branch 'master' into 14988-wb-rails5-upgrade
authorLucas Di Pentima <ldipentima@veritasgenetics.com>
Wed, 8 May 2019 19:25:57 +0000 (16:25 -0300)
committerLucas Di Pentima <ldipentima@veritasgenetics.com>
Wed, 8 May 2019 19:25:57 +0000 (16:25 -0300)
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <ldipentima@veritasgenetics.com>

235 files changed:
.gitignore
.licenseignore
build/run-build-docker-jobs-image.sh
build/run-library.sh
build/run-tests.sh
doc/Rakefile
doc/_config.yml
doc/_includes/_config_default_yml.liquid [new symlink]
doc/admin/config-migration.html.textile.liquid [new file with mode: 0644]
doc/admin/config.html.textile.liquid [new file with mode: 0644]
doc/admin/controlling-container-reuse.html.textile.liquid [new file with mode: 0644]
doc/admin/logs-table-management.html.textile.liquid [new file with mode: 0644]
doc/admin/upgrading.html.textile.liquid
doc/api/methods/containers.html.textile.liquid
doc/install/arvbox.html.textile.liquid
doc/install/install-keep-balance.html.textile.liquid
doc/install/install-nodemanager.html.textile.liquid
doc/sdk/index.html.textile.liquid
doc/sdk/java-v2/example.html.textile.liquid [new file with mode: 0644]
doc/sdk/java-v2/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/java-v2/javadoc.html.textile.liquid [new file with mode: 0644]
doc/sdk/java/index.html.textile.liquid
lib/config/config.default.yml [new file with mode: 0644]
lib/dispatchcloud/container/queue_test.go
sdk/cwl/arvados_cwl/executor.py
sdk/cwl/arvados_cwl/pathmapper.py
sdk/cwl/tests/test_make_output.py
sdk/java-v2/.gitignore [new file with mode: 0644]
sdk/java-v2/.licenseignore [new file with mode: 0644]
sdk/java-v2/COPYING [new file with mode: 0644]
sdk/java-v2/README.md [new file with mode: 0644]
sdk/java-v2/agpl-3.0.txt [new file with mode: 0644]
sdk/java-v2/apache-2.0.txt [new file with mode: 0644]
sdk/java-v2/build.gradle [new file with mode: 0644]
sdk/java-v2/gradle.properties [new file with mode: 0644]
sdk/java-v2/settings.gradle [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServicesApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/ApiError.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Collection.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionList.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/GroupList.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepService.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/UserList.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/common/Characters.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/common/Headers.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/common/Patterns.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileTransferHandler.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/utils/FileMerge.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/utils/FileSplit.java [new file with mode: 0644]
sdk/java-v2/src/main/resources/reference.conf [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServerApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServicesApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepClientTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java [new file with mode: 0644]
sdk/java-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java [new file with mode: 0644]
sdk/java-v2/src/test/resources/application.conf [new file with mode: 0644]
sdk/java-v2/src/test/resources/integration-tests-application.conf [new file with mode: 0644]
sdk/java-v2/src/test/resources/integration-tests-application.conf.example [new file with mode: 0644]
sdk/java-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json [new file with mode: 0644]
sdk/java-v2/src/test/resources/selfsigned.keystore.jks [new file with mode: 0644]
sdk/java-v2/test-in-docker.sh [new file with mode: 0755]
services/api/app/controllers/application_controller.rb
services/api/app/controllers/arvados/v1/groups_controller.rb
services/api/app/controllers/arvados/v1/healthcheck_controller.rb
services/api/app/controllers/arvados/v1/schema_controller.rb
services/api/app/controllers/static_controller.rb
services/api/app/controllers/user_sessions_controller.rb
services/api/app/mailers/admin_notifier.rb
services/api/app/mailers/profile_notifier.rb
services/api/app/mailers/user_notifier.rb
services/api/app/models/api_client_authorization.rb
services/api/app/models/arvados_model.rb
services/api/app/models/blob.rb
services/api/app/models/collection.rb
services/api/app/models/commit.rb
services/api/app/models/commit_ancestor.rb
services/api/app/models/container.rb
services/api/app/models/container_request.rb
services/api/app/models/job.rb
services/api/app/models/node.rb
services/api/app/models/repository.rb
services/api/app/models/user.rb
services/api/app/views/admin_notifier/new_inactive_user.text.erb
services/api/app/views/admin_notifier/new_user.text.erb
services/api/app/views/user_notifier/account_is_setup.text.erb
services/api/config/application.default.yml
services/api/config/application.rb
services/api/config/arvados_config.rb [new file with mode: 0644]
services/api/config/config.default.yml [new symlink]
services/api/config/initializers/legacy_jobs_api.rb
services/api/config/initializers/load_config.rb [deleted file]
services/api/config/initializers/lograge.rb
services/api/config/initializers/omniauth_init.rb
services/api/config/initializers/preload_all_models.rb
services/api/config/secrets.yml [deleted file]
services/api/db/migrate/20190422144631_fill_missing_modified_at.rb [new file with mode: 0644]
services/api/db/structure.sql
services/api/lib/audit_logs.rb
services/api/lib/config_loader.rb [new file with mode: 0644]
services/api/lib/crunch_dispatch.rb
services/api/lib/current_api_client.rb
services/api/lib/enable_jobs_api.rb
services/api/lib/has_uuid.rb
services/api/lib/josh_id.rb
services/api/lib/load_param.rb
services/api/lib/log_reuse_info.rb
services/api/lib/refresh_permission_view.rb
services/api/lib/sweep_trashed_objects.rb
services/api/lib/tasks/config.rake [new file with mode: 0644]
services/api/lib/tasks/config_check.rake [deleted file]
services/api/lib/tasks/config_dump.rake [deleted file]
services/api/lib/tasks/delete_old_container_logs.rake
services/api/lib/tasks/delete_old_job_logs.rake
services/api/lib/trashable.rb
services/api/test/functional/arvados/v1/collections_controller_test.rb
services/api/test/functional/arvados/v1/filters_test.rb
services/api/test/functional/arvados/v1/groups_controller_test.rb
services/api/test/functional/arvados/v1/jobs_controller_test.rb
services/api/test/functional/arvados/v1/nodes_controller_test.rb
services/api/test/functional/arvados/v1/repositories_controller_test.rb
services/api/test/functional/arvados/v1/schema_controller_test.rb
services/api/test/functional/arvados/v1/users_controller_test.rb
services/api/test/helpers/git_test_helper.rb
services/api/test/integration/collections_api_test.rb
services/api/test/integration/groups_test.rb
services/api/test/integration/remote_user_test.rb
services/api/test/integration/user_sessions_test.rb
services/api/test/tasks/delete_old_container_logs_test.rb
services/api/test/tasks/delete_old_job_logs_test.rb
services/api/test/test_helper.rb
services/api/test/unit/blob_test.rb
services/api/test/unit/collection_test.rb
services/api/test/unit/commit_test.rb
services/api/test/unit/container_request_test.rb
services/api/test/unit/container_test.rb
services/api/test/unit/crunch_dispatch_test.rb
services/api/test/unit/fail_jobs_test.rb
services/api/test/unit/job_test.rb
services/api/test/unit/log_test.rb
services/api/test/unit/node_test.rb
services/api/test/unit/repository_test.rb
services/api/test/unit/user_notifier_test.rb
services/api/test/unit/user_test.rb
services/keep-balance/balance.go
services/keep-balance/balance_run_test.go
services/keep-balance/balance_test.go
services/keep-balance/block_state.go
services/keep-balance/collection.go
services/keep-balance/integration_test.go
services/keep-balance/main.go
services/keep-balance/server.go
services/keepstore/azure_blob_volume.go
services/keepstore/azure_blob_volume_test.go
services/keepstore/handlers.go
tools/arvbox/bin/arvbox
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/createusers.sh
tools/arvbox/lib/arvbox/docker/devenv.sh
tools/crunchstat-summary/crunchstat_summary/summarizer.py
tools/keep-xref/keep-xref.py [new file with mode: 0755]

index db3020ae82d13231817872355b04dd09849f87a0..877ccdf4dfd1da971a3f736d18af06e381869e5d 100644 (file)
@@ -13,6 +13,7 @@ docker/config.yml
 doc/.site
 doc/sdk/python/arvados
 doc/sdk/R/arvados
+doc/sdk/java-v2/javadoc
 sdk/perl/MYMETA.*
 sdk/perl/Makefile
 sdk/perl/blib
index 45028bf888ff6a40f910f29197aaac1a8d29516f..a9b6f5f6cafdbedb1a202f5f9c0e3526e7a54549 100644 (file)
@@ -13,6 +13,7 @@ build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados
 *by-sa-3.0.txt
 *COPYING
 doc/fonts/*
+doc/_includes/_config_default_yml.liquid
 doc/user/cwl/federated/*
 */docker_image
 docker/jobs/apt.arvados.org*.list
@@ -59,6 +60,7 @@ sdk/pam/examples/shellinabox
 sdk/pam/pam-configs/arvados
 sdk/python/tests/data/*
 services/api/config/unbound.template
+services/api/config/config.default.yml
 services/arv-web/sample-cgi-app/public/.htaccess
 services/arv-web/sample-cgi-app/public/index.cgi
 services/keepproxy/pkg-extras/etc/default/keepproxy
@@ -74,3 +76,5 @@ sdk/R/ArvadosR.Rproj
 *.Rd
 lib/dispatchcloud/test/sshkey_*
 *.asc
+sdk/java-v2/build.gradle
+sdk/java-v2/settings.gradle
index 7d7e1fc8abf9171df1d905c22478eabf23236299..842975adb0e7d1dc052535cce7937f82a1d75417 100755 (executable)
@@ -9,7 +9,7 @@ function usage {
     echo >&2
     echo >&2 "$0 options:"
     echo >&2 "  -t, --tags                    version tag for docker"
-    echo >&2 "  -r, --repo                    Arvados package repot to use: dev, testing, stable (default: dev)"
+    echo >&2 "  -r, --repo                    Arvados package repo to use: dev (default), testing, stable"
     echo >&2 "  -u, --upload                  Upload the images (docker push)"
     echo >&2 "  --no-cache                    Don't use build cache"
     echo >&2 "  -h, --help                    Display this help and exit"
@@ -22,7 +22,7 @@ REPO=dev
 
 # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
 TEMP=`getopt -o hut:r: \
-    --long help,upload,no-cache,tags,repo: \
+    --long help,upload,no-cache,tags:,repo: \
     -n "$0" -- "$@"`
 
 if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
index 1daceff2393537485d0dd51f381648076f43d512..01a6a06c14afffa2806673a3c7ac0f98d8009ab5 100755 (executable)
@@ -352,6 +352,15 @@ handle_rails_package() {
     if  [[ "$pkgname" != "arvados-workbench" ]]; then
       exclude_list+=('config/database.yml')
     fi
+    # for arvados-api-server, we need to dereference the
+    # config/config.default.yml file. There is no fpm way to do that, sadly
+    # (excluding the existing symlink and then adding the file from its source
+    # path doesn't work, sadly.
+    if [[ "$pkgname" == "arvados-api-server" ]]; then
+      mv /arvados/services/api/config/config.default.yml /arvados/services/api/config/config.default.yml.bu
+      cp -p /arvados/lib/config/config.default.yml /arvados/services/api/config/
+      exclude_list+=('config/config.default.yml.bu')
+    fi
     for exclude in ${exclude_list[@]}; do
         switches+=(-x "$exclude_root/$exclude")
     done
@@ -359,6 +368,11 @@ handle_rails_package() {
               -x "$exclude_root/vendor/cache-*" \
               -x "$exclude_root/vendor/bundle" "$@" "$license_arg"
     rm -rf "$scripts_dir"
+    # Undo the deferencing we did above
+    if [[ "$pkgname" == "arvados-api-server" ]]; then
+      rm -f /arvados/services/api/config/config.default.yml
+      mv /arvados/services/api/config/config.default.yml.bu /arvados/services/api/config/config.default.yml
+    fi
 }
 
 # Build python packages with a virtualenv built-in
index 5ca3b8e42599dea5a0a5db952becfe6e51743849..68fc15183e5a55cb488cab48c7f9a83367f6c075 100755 (executable)
@@ -122,6 +122,7 @@ sdk/go/stats
 sdk/go/crunchrunner
 sdk/cwl
 sdk/R
+sdk/java-v2
 tools/sync-groups
 tools/crunchstat-summary
 tools/crunchstat-summary:py3
@@ -394,7 +395,7 @@ start_services() {
         return 0
     fi
     . "$VENVDIR/bin/activate"
-    echo 'Starting API, keepproxy, keep-web, ws, arv-git-httpd, and nginx ssl proxy...'
+    echo 'Starting API, controller, keepproxy, keep-web, arv-git-httpd, ws, and nginx ssl proxy...'
     if [[ ! -d "$WORKSPACE/services/api/log" ]]; then
        mkdir -p "$WORKSPACE/services/api/log"
     fi
@@ -749,6 +750,7 @@ do_test_once() {
     title "test $1"
     timer_reset
 
+    result=
     if which deactivate >/dev/null; then deactivate; fi
     if ! . "$VENVDIR/bin/activate"
     then
@@ -821,6 +823,7 @@ do_install_once() {
     title "install $1"
     timer_reset
 
+    result=
     if which deactivate >/dev/null; then deactivate; fi
     if [[ "$1" != "env" ]] && ! . "$VENVDIR/bin/activate"; then
         result=1
@@ -955,6 +958,7 @@ install_services/api() {
             || return 1
 
     cd "$WORKSPACE/services/api" \
+        && RAILS_ENV=test bundle exec rails db:environment:set \
         && RAILS_ENV=test bundle exec rake db:drop \
         && RAILS_ENV=test bundle exec rake db:setup \
         && RAILS_ENV=test bundle exec rake db:fixtures:load
@@ -971,6 +975,7 @@ pythonstuff=(
     services/fuse
     services/nodemanager
     tools/crunchstat-summary
+    tools/crunchstat-summary:py3
 )
 
 declare -a gostuff
@@ -1046,7 +1051,11 @@ test_gofmt() {
 test_services/api() {
     rm -f "$WORKSPACE/services/api/git-commit.version"
     cd "$WORKSPACE/services/api" \
+<<<<<<< HEAD
         && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec bin/rails test TESTOPTS='-v -d' ${testargs[services/api]}
+=======
+        && env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} bundle exec rake test TESTOPTS='-v -d' ${testargs[services/api]}
+>>>>>>> master
 }
 
 test_sdk/ruby() {
@@ -1067,6 +1076,10 @@ test_sdk/cli() {
         && KEEP_LOCAL_STORE=/tmp/keep bundle exec rake test TESTOPTS=-v ${testargs[sdk/cli]}
 }
 
+test_sdk/java-v2() {
+    cd "$WORKSPACE/sdk/java-v2" && gradle test
+}
+
 test_services/login-sync() {
     cd "$WORKSPACE/services/login-sync" \
         && bundle exec rake test TESTOPTS=-v ${testargs[services/login-sync]}
@@ -1164,6 +1177,7 @@ test_all() {
     do_test sdk/R
     do_test sdk/cli
     do_test services/login-sync
+    do_test sdk/java-v2
     do_test services/nodemanager_integration
     for p in "${pythonstuff[@]}"
     do
index 9deca3a28cf8cc8c6911097aee68f01426d86177..f1aa3bfce87495ced721ef499a1417afcb6c4eca 100644 (file)
@@ -6,7 +6,7 @@
 require "rubygems"
 require "colorize"
 
-task :generate => [ :realclean, 'sdk/python/arvados/index.html', 'sdk/R/arvados/index.html' ] do
+task :generate => [ :realclean, 'sdk/python/arvados/index.html', 'sdk/R/arvados/index.html', 'sdk/java-v2/javadoc/index.html' ] do
   vars = ['baseurl', 'arvados_cluster_uuid', 'arvados_api_host', 'arvados_workbench_host']
   vars.each do |v|
     if ENV[v]
@@ -87,6 +87,27 @@ EOF
   end
 end
 
+file "sdk/java-v2/javadoc/index.html" do |t|
+  `which java`
+  if $? == 0
+    `which gradle`
+    if $? != 0
+      puts "Warning: gradle not found, java sdk documentation will not be generated".colorize(:light_red)
+    else
+      tgt = Dir.pwd
+      docfiles = []
+      Dir.chdir("../sdk/java-v2") do
+        STDERR.puts `gradle javadoc 2>&1`
+        raise if $? != 0
+      end
+      cp_r("../sdk/java-v2/build/docs/javadoc", "sdk/java-v2")
+      raise if $? != 0
+    end
+  else
+    puts "Warning: java not found, java sdk documentation will not be generated".colorize(:light_red)
+  end
+end
+
 task :linkchecker => [ :generate ] do
   Dir.chdir(".site") do
     `which linkchecker`
@@ -101,6 +122,7 @@ end
 task :clean do
   rm_rf "sdk/python/arvados"
   rm_rf "sdk/R"
+  rm_rf "sdk/java-v2/javadoc"
 end
 
 require "zenweb/tasks"
index a5b53442ca1848118a8065342b78ebe4460ee31c..cfae671929c0c69a7f63672ee584348d751a3975 100644 (file)
@@ -104,7 +104,11 @@ navbar:
     - Ruby:
       - sdk/ruby/index.html.textile.liquid
       - sdk/ruby/example.html.textile.liquid
-    - Java:
+    - Java v2:
+      - sdk/java-v2/index.html.textile.liquid
+      - sdk/java-v2/example.html.textile.liquid
+      - sdk/java-v2/javadoc.html.textile.liquid
+    - Java v1:
       - sdk/java/index.html.textile.liquid
       - sdk/java/example.html.textile.liquid
   api:
@@ -154,8 +158,11 @@ navbar:
   admin:
     - Topics:
       - admin/index.html.textile.liquid
+    - Configuration:
+      - admin/config.html.textile.liquid
     - Upgrading and migrations:
       - admin/upgrading.html.textile.liquid
+      - admin/config-migration.html.textile.liquid
       - install/migrate-docker19.html.textile.liquid
       - admin/upgrade-crunch2.html.textile.liquid
     - Users and Groups:
@@ -174,6 +181,8 @@ navbar:
     - Other:
       - admin/collection-versioning.html.textile.liquid
       - admin/federation.html.textile.liquid
+      - admin/controlling-container-reuse.html.textile.liquid
+      - admin/logs-table-management.html.textile.liquid
   installguide:
     - Overview:
       - install/index.html.textile.liquid
diff --git a/doc/_includes/_config_default_yml.liquid b/doc/_includes/_config_default_yml.liquid
new file mode 120000 (symlink)
index 0000000..457d6fa
--- /dev/null
@@ -0,0 +1 @@
+../../lib/config/config.default.yml
\ No newline at end of file
diff --git a/doc/admin/config-migration.html.textile.liquid b/doc/admin/config-migration.html.textile.liquid
new file mode 100644 (file)
index 0000000..11546c0
--- /dev/null
@@ -0,0 +1,50 @@
+---
+layout: default
+navsection: admin
+title: Migrating Configuration
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados is migrating to a centralized configuration file for all components.  The centralized Arvados configuration is @/etc/arvados/config.yml@.  Components that support the new centralized configuration are listed below.  Components not listed here do not yet support centralized configuration.  During the migration period, legacy configuration files will continue to be loaded and take precedence over the centralized configuration file.
+
+h2. API server
+
+The legacy API server configuration is stored in @config/application.yml@ and @config/database.yml@.  After migration to @/etc/arvados/config.yml@, both of these files should be moved out of the way and/or deleted.
+
+Change to the API server directory and use the following commands:
+
+<pre>
+$ bundle exec rake config:migrate > config.yml
+$ cp config.yml /etc/arvados/config.yml
+</pre>
+
+This will print the contents of @config.yml@ after merging with legacy @application.yml@.  It may then be redirected to a file and copied to @/etc/arvados/config.yml@.
+
+If you wish to update @config.yml@ configuration by hand, or check that everything has been migrated, use @config:diff@ to print configuration items that differ between @application.yml@ and the system @config.yml@.
+
+<pre>
+$ bundle exec rake config:diff
+</pre>
+
+This command will also report if no migrations are required.
+
+h2. crunch-dispatch-slurm
+
+Currently only reads @InstanceTypes@ from centralized configuration.  Still requires component-specific configuration file.
+
+h2. keepstore
+
+Currently only reads @RemoteClusters@ from centralized configuration.  Still requires component-specific configuration file.
+
+h2. arvados-controller
+
+Only supports centralized config file.  No migration needed.
+
+h2. arvados-dispatch-cloud
+
+Only supports centralized config file.  No migration needed.
diff --git a/doc/admin/config.html.textile.liquid b/doc/admin/config.html.textile.liquid
new file mode 100644 (file)
index 0000000..a1dcdb3
--- /dev/null
@@ -0,0 +1,19 @@
+---
+layout: default
+navsection: admin
+title: Configuration reference
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The master Arvados configuration is stored at @/etc/arvados/config.yml@
+
+See "Migrating Configuration":config-migration.html for information about migrating from legacy component-specific configuration files.
+
+{% codeblock as yaml %}
+{% include 'config_default_yml' %}
+{% endcodeblock %}
diff --git a/doc/admin/controlling-container-reuse.html.textile.liquid b/doc/admin/controlling-container-reuse.html.textile.liquid
new file mode 100644 (file)
index 0000000..76f57f3
--- /dev/null
@@ -0,0 +1,21 @@
+---
+layout: default
+navsection: admin
+title: Controlling container reuse
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page describes how an admin can control container reuse using the @arv@ command. This can be utilized to avoid reusing a completed container without disabling reuse for the corresponding steps in affected workflows. For example, if a container exited successfully but produced bad output, it may not be feasible to update the workflow immediately. Meanwhile, changing the state of the container from @Complete@ to @Cancelled@ will prevent it from being used in subsequent workflows.
+
+If a container is in the @Complete@ state, the following @arv@ command will change its state to @Cancelled@, where @xxxxx-xxxxx-xxxxxxxxxxxxxxx@ is the @UUID@ of the container:
+
+<pre>arv container update -u xxxxx-xxxxx-xxxxxxxxxxxxxxx -c '{"state":"Cancelled"}'</pre>
+
+Use the following command to list all containers that exited with 0 and were then cancelled:
+
+<pre>arv container list --filters='[["state", "=", "Cancelled"], ["exit_code", "=", 0]]'</pre>See the "arv CLI tool overview":{{site.baseurl}}/sdk/cli/index.html for more details about using the @arv@ command.
diff --git a/doc/admin/logs-table-management.html.textile.liquid b/doc/admin/logs-table-management.html.textile.liquid
new file mode 100644 (file)
index 0000000..dedd960
--- /dev/null
@@ -0,0 +1,55 @@
+---
+layout: default
+navsection: admin
+title: "Logs table management"
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page aims to provide insight about managing the ever growing API Server's logs table.
+
+h3. Logs table purpose & behavior
+
+This database table currently serves three purposes:
+* It's an audit log, permitting admins and users to look up the time and details of past changes to Arvados objects via @arvados.v1.logs.*@ endpoints.
+* It's a mechanism for passing cache-invalidation events, used by websocket servers, the Python SDK "events" library, and @arvados-cwl-runner@ to detect when an object has changed.
+* It's a staging area for stdout/stderr text coming from users' containers, permitting users to see what their containers are doing while they are still running (i.e., before those text files are written to Keep).
+
+As a result, this table grows indefinitely, even on sites where policy does not require an audit log; making backups, migrations, and upgrades unnecessarily slow and painful.
+
+h3. API Server configuration
+
+To solve the problem mentioned above, the API server offers the possibility to limit the amount of log information stored on the table:
+
+<pre>
+# Time to keep audit logs (a row in the log table added each time an
+# Arvados object is created, modified, or deleted) in the PostgreSQL
+# database. Currently, websocket event notifications rely on audit
+# logs, so this should not be set lower than 600 (10 minutes).
+max_audit_log_age: 1209600
+</pre>
+
+...and to prevent surprises and avoid bad database behavior (especially the first time the cleanup job runs on an existing cluster with a huge backlog) a maximum number of rows to delete in a single transaction.
+
+<pre>
+# Maximum number of log rows to delete in a single SQL transaction.
+#
+# If max_audit_log_delete_batch is 0, log entries will never be
+# deleted by Arvados. Cleanup can be done by an external process
+# without affecting any Arvados system processes, as long as very
+# recent (<5 minutes old) logs are not deleted.
+#
+# 100000 is a reasonable batch size for most sites.
+max_audit_log_delete_batch: 0
+</pre>
+
+This feature works when both settings are non-zero, periodically dispatching a background task that deletes all log rows older than @max_audit_log_age@.
+The events being cleaned up by this process don't include job/container stderr logs (they're handled by the existing @delete job/container logs@ rake tasks)
+
+h3. Additional consideration
+
+Depending on the local installation's audit requirements, the cluster admins should plan for an external backup procedure before enabling this feature, as this information is not replicated anywhere else.
index 6e2e6cba6dfeb1873d8a58049e1cdb65ac801bdd..09bef2a62acd18c5f2d0b02ef022248e50033956 100644 (file)
@@ -125,6 +125,10 @@ h4. Centos7 package for libpam-arvados depends on the python-pam package, which
 
 As part of story "#9945":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7.
 
+h4. New configuration
+
+Arvados is migrating to a centralized configuration file for all components.  During the migration, legacy configuration files will continue to be loaded.  See "Migrating Configuration":config-migration.html for details.
+
 h3. v1.3.0 (2018-12-05)
 
 This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections.
index f0ce8e362f40ee0c533829b3c04adf7afa6ccf88..d59c66edc3cbf5f492dbb0c4befac668e209c980 100644 (file)
@@ -65,9 +65,11 @@ table(table table-bordered table-condensed).
 |Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled|
 |Locked|A dispatcher has "taken" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled|
 |Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled|
-|Complete|Container was running, and the contained process/command has exited.|-|
+|Complete|Container was running, and the contained process/command has exited.|Cancelled|
 |Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-|
 
+See "Controlling container reuse":{{site.baseurl}}/admin/controlling-container-reuse.html for details about changing state from @Complete@ to @Cancelled@
+
 h2(#mount_types). {% include 'mount_types' %}
 
 h2(#runtime_constraints). {% include 'container_runtime_constraints' %}
index 2d94d32ac5edbb844eaa9dca37c18d49aef7a689..64cc9c6f89edd737f72b77b66d47f0cb623d3fe7 100644 (file)
@@ -29,29 +29,32 @@ h2. Usage
 
 <pre>
 $ arvbox
-Arvados-in-a-box                      http://arvados.org
-
-start|run <config> [tag]  start arvbox container
-stop       stop arvbox container
-restart <config>  stop, then run again
-status     print some information about current arvbox
-ip         print arvbox docker container ip address
-host       print arvbox published host
-shell      enter arvbox shell
-open       open arvbox workbench in a web browser
-root-cert  get copy of root certificate
-update  <config> stop, pull latest image, run
-build   <config> build arvbox Docker image
-reboot  <config> stop, build arvbox Docker image, run
-rebuild <config> build arvbox Docker image, no layer cache
-reset      delete arvbox arvados data (be careful!)
-destroy    delete all arvbox code and data (be careful!)
-log <service> tail log of specified service
-ls <options>  list directories inside arvbox
-cat <files>   get contents of files inside arvbox
-pipe       run a bash script piped in from stdin
-sv <start|stop|restart> <service> change state of service inside arvbox
-clone <from> <to>   clone an arvbox
+Arvados-in-a-box             https://doc.arvados.org/install/arvbox.html
+
+start|run <config> [tag]   start arvbox container
+stop               stop arvbox container
+restart <config>   stop, then run again
+status             print some information about current arvbox
+ip                 print arvbox docker container ip address
+host               print arvbox published host
+shell              enter shell as root
+ashell             enter shell as 'arvbox'
+psql               enter postgres console
+open               open arvbox workbench in a web browser
+root-cert          get copy of root certificate
+update  <config>   stop, pull latest image, run
+build   <config>   build arvbox Docker image
+reboot  <config>   stop, build arvbox Docker image, run
+rebuild <config>   build arvbox Docker image, no layer cache
+reset              delete arvbox arvados data (be careful!)
+destroy            delete all arvbox code and data (be careful!)
+log <service>      tail log of specified service
+ls <options>       list directories inside arvbox
+cat <files>        get contents of files inside arvbox
+pipe               run a bash script piped in from stdin
+sv <start|stop|restart> <service>
+                   change state of service inside arvbox
+clone <from> <to>  clone dev arvbox
 </pre>
 
 h2. Install root certificate
@@ -85,7 +88,11 @@ Demo configuration.  Boots a complete Arvados environment inside the container.
 
 h3. test
 
-Run the test suite.
+Starts postgres and initializes the API server, then runs the Arvados test suite.  Will pass command line arguments to test runner.  Supports test runner interactive mode.
+
+h3. devenv
+
+Starts a minimal container with no services and the host's $HOME bind mounted inside the container, then enters an interactive login shell.  Intended to make it convenient to use tools installed in arvbox that don't require services.
 
 h3. publicdev
 
index 68bf07a4ae50020a40e73efffddb876c216d1607..4a35f448e2996dfe8134a674af3cd422752e5ee2 100644 (file)
@@ -81,11 +81,11 @@ Client:
   AuthToken: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
 KeepServiceTypes:
   - disk
-Listen: :9005
 ManagementToken: <span class="userinput">xyzzy</span>
 RunPeriod: 10m
 CollectionBatchSize: 100000
 CollectionBuffers: 1000
+LostBlocksFile: /tmp/keep-balance-lost-blocks.txt    # If given, this file will be updated atomically during each successful run.
 </code></pre>
 </notextile>
 
index defec2589e82a3f32266f39e500c54401ee57683..770527da1fe4096af877a67363ee4d6e369a0a7d 100644 (file)
@@ -556,7 +556,7 @@ subscription_id = 00000000-0000-0000-0000-000000000000
 # https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/
 # and updated for v2 of the Azure cli tool.
 #
-# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://<Your_Application_Uri>" --password <Your_Password>
+# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://<Your_Application_Uri>" --password <Your_Password> --end-date <Desired_credential_expiry_date>
 # az ad sp create "<Application_Id>"
 # az role assignment create --assignee "<Application_Id>" --role Owner --resource-group "<Your_Azure_Arvados_Resource_Group>"
 #
index dbfcaedc71742d56cbfbb2facf500b379ec6b196..8ff5ddc0994537981d83983756f2dc231ba0d769 100644 (file)
@@ -17,6 +17,7 @@ This section documents language bindings for the "Arvados API":{{site.baseurl}}/
 * "R SDK":{{site.baseurl}}/sdk/R/index.html
 * "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
 * "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
-* "Java SDK":{{site.baseurl}}/sdk/java/index.html
+* "Java SDK v2":{{site.baseurl}}/sdk/java-v2/index.html
+* "Java SDK v1":{{site.baseurl}}/sdk/java/index.html
 
 Many Arvados Workbench pages, under the the *Advanced* tab, provide examples of API and SDK use for accessing the current resource .
diff --git a/doc/sdk/java-v2/example.html.textile.liquid b/doc/sdk/java-v2/example.html.textile.liquid
new file mode 100644 (file)
index 0000000..e73f968
--- /dev/null
@@ -0,0 +1,49 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java SDK v2
+title: Examples
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+In these examples, the site prefix is @aaaaa@.
+
+h2.  Initialize SDK
+
+{% codeblock as java %}
+package org.arvados.example;
+
+import java.util.List;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.config.ExternalConfigProvider;
+import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.api.client.CollectionsApiClient;
+
+public class CollectionExample {
+    public static void main(String[] argv) {
+       ConfigProvider conf = ExternalConfigProvider.builder().
+           apiProtocol("https").
+           apiHost("qr1hi.arvadosapi.com").
+           apiPort(443).
+           apiToken("...").
+           build();
+       CollectionsApiClient collectionsApi = new CollectionsApiClient(conf);
+       /* ... */
+    }
+}
+{% endcodeblock %}
+
+h2. list
+
+{% codeblock as java %}
+       CollectionList cl = collectionsApi.list();
+       List<Collection> items = cl.getItems();
+       for (int i = 0; i < items.size(); i++) {
+           System.out.println(items.get(i));
+       }
+{% endcodeblock %}
diff --git a/doc/sdk/java-v2/index.html.textile.liquid b/doc/sdk/java-v2/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..0a8953a
--- /dev/null
@@ -0,0 +1,146 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java SDK v2
+title: "Installation"
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados Java SDK v2 provides a high level API for working with Arvados resources.
+
+h2. Using the SDK
+
+The SDK is packaged as a JAR named @arvados-java-<version>.jar@, which is published to Maven Central and can be included using Maven, Gradle, or by hand.
+
+Here is an example @build.gradle@ file that uses the Arados java sdk:
+
+<pre>
+apply plugin: 'application'
+apply plugin: 'java-library'
+apply plugin: 'maven'
+
+repositories {
+    mavenCentral()
+}
+
+application {
+    mainClassName = "org.arvados.example.CollectionExample"
+}
+
+dependencies {
+    api 'org.arvados:arvados-java-sdk:0.1.0'
+}
+</pre>
+
+See "Java SDK Examples":example.html to get started using the SDK.
+
+h3. Logging
+
+The SDK uses the SLF4J facade library for logging. A concrete logging "binding":https://www.slf4j.org/manual.html#swapping (and configuration, if required) must be provided by a client. For small applications, you can use the Simple implementation by adding slf4j-simple-1.8.0-beta4.jar to your classpath.
+
+h3. Configuration
+
+"TypeSafe Configuration":https://github.com/lightbend/config is used for configuring this library.
+
+Please review src/main/resources/reference.conf for default values provided with this library.
+
+* **keepweb-host** - host of your Keep-Web server (default: localhost)
+* **keepweb-port** - port of your Keep-Web server (default: 8000)
+* **host** - host of your Arvados API server
+* **port** - port of your Arvados API server
+* **token** - Arvados token to authenticate registered user, one must provide "token obtained from Arvados Workbench":https://doc.arvados.org/user/reference/api-tokens.html
+* **protocol** - don't change to unless really needed (default: https)
+* **host-insecure** - ignores SSL certificate verification if true (default: false Don't change to *true* unless really needed)
+* **split-size** - size of chunk files in megabytes (default: 64)
+* **temp-dir** - temporary chunk files storage
+* **copies** - amount of chunk files duplicates per Keep server
+* **retries** - UNIMPLEMENTED
+
+In order to override default settings one can create an application.conf file in an application.  Example: src/test/resources/application.conf.
+
+Alternatively @ExternalConfigProvider@ class can be used to pass configuration via code.  @ExternalConfigProvider@ comes with a builder and all of the above values must be provided in order for it to work properly.
+
+@ArvadosFacade@ has two constructors, one without arguments that uses values from application.conf and second one taking @ExternalConfigProvider@ as an argument.
+
+h3. API clients
+
+All API clients inherit from @BaseStandardApiClient@. This class contains implementation of all common methods as described in "Arvados Common Resource Methods":http://doc.arvados.org/api/methods.html.
+
+Parameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:
+
+{% codeblock as java %}
+String uuid = "ardev-4zz18-rxcql7qwyakg1r1";
+
+Collection actual = client.get(uuid);
+{% endcodeblock %}
+
+{% codeblock as java %}
+ListArgument listArgument = ListArgument.builder()
+        .filters(Arrays.asList(
+                Filter.of("owner_uuid", Operator.LIKE, "ardev%"),
+                Filter.of("name", Operator.LIKE, "Super%"),
+                Filter.of("portable_data_hash", Operator.IN, Lists.newArrayList("54f6d9f59065d3c009d4306660989379+65")
+            )))
+        .build();
+
+CollectionList actual = client.list(listArgument);
+{% endcodeblock %}
+
+Non-standard API clients must inherit from BaseApiClient. For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.
+
+h3. Business logic
+
+More advanced API data handling could be implemented as *Facade* classes. In current version functionalities provided by SDK are handled by @ArvadosFacade@. They include:
+
+* **downloading single file from collection** - using Keep-Web
+* **downloading whole collection** - using Keep-Web or Keep Server API
+* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details
+* **uploading single file** - to either new or existing collection
+* **uploading list of files** - to either new or existing collection
+* **creating an empty collection**
+* **getting current user info**
+* **listing current user's collections**
+* **creating new project**
+* **deleting certain collection**
+
+h3. Note regarding Keep-Web
+
+The Java SDK requires Keep Web (which is part of the standard configuration) as well as the API server and Keep server(s).
+
+h3. Integration tests
+
+In order to run the integration tests, all fields within following configuration file must be provided: @src/test/resources/integration-test-appliation.conf@
+
+
+The parameter @integration-tests.project-uuid@ should contain UUID of one project available to user who's token was provided within configuration file.
+
+Integration tests require connection to a real Arvados server.
+
+h3. Note regarding file naming
+
+When uploading via the current implementation of the Java SDK all uploaded files within single collection must have different names. This applies also to uploading files to already existing collection. Renaming files with duplicate names is not currently implemented.
+
+h3. Javadoc
+
+See "Javadoc":javadoc.html
+
+h2. Building the Arvados SDK
+
+Dependencies:
+* JDK for Java 8 or later "https://www.oracle.com/technetwork/java/javase/downloads/index.html":https://www.oracle.com/technetwork/java/javase/downloads/index.html
+* Gradle "https://gradle.org/install/":https://gradle.org/install/
+
+
+<notextile>
+<pre>
+$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+$ <code class="userinput">cd arvados/sdk/java-v2</code>
+$ <code class="userinput">gradle test</code>
+$ <code class="userinput">gradle jar</code>
+</pre>
+This will build the SDK and run all unit tests, then generate an Arvados Java sdk jar file in build/libs/arvados-java-2.0.0.jar
+</notextile>
diff --git a/doc/sdk/java-v2/javadoc.html.textile.liquid b/doc/sdk/java-v2/javadoc.html.textile.liquid
new file mode 100644 (file)
index 0000000..872150f
--- /dev/null
@@ -0,0 +1,15 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java v2
+title: "Javadoc Reference"
+
+no_nav_left: true
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+notextile. <iframe src="javadoc/index.html" style="width:100%; height:100%; border:none" />
index 6099d7f8610c572b46caebb557aada5afd012389..111c0631d0592b1f9a0c62e3bfd899fdb1186dc4 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: Java
+navmenu: Java SDK v1
 title: "Installation"
 ...
 {% comment %}
@@ -10,7 +10,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-The Java SDK provides a generic set of wrappers so you can make API calls in java.
+The Java SDK v1 provides a low level API to call Arvados from Java.
 
 h3. Introdution
 
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
new file mode 100644 (file)
index 0000000..bea6387
--- /dev/null
@@ -0,0 +1,455 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
+#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.default.yml
+
+Clusters:
+  xxxxx:
+    SystemRootToken: ""
+
+    # Token to be included in all healthcheck requests. Disabled by default.
+    # Server expects request header of the format "Authorization: Bearer xxx"
+    ManagementToken: ""
+
+    Services:
+      RailsAPI:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Controller:
+        InternalURLs: {}
+        ExternalURL: ""
+      Websocket:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepbalance:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      GitSSH:
+        ExternalURL: ""
+      DispatchCloud:
+        InternalURLs: {}
+      SSO:
+        ExternalURL: ""
+      Keepproxy:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAV:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAVDownload:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Composer:
+        ExternalURL: ""
+      WebShell:
+        ExternalURL: ""
+      Workbench1:
+        InternalURLs: {}
+        ExternalURL: ""
+      Workbench2:
+        ExternalURL: ""
+    PostgreSQL:
+      # max concurrent connections per arvados server daemon
+      ConnectionPool: 32
+      Connection:
+        # All parameters here are passed to the PG client library in a connection string;
+        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+        Host: ""
+        Port: 0
+        User: ""
+        Password: ""
+        DBName: ""
+    API:
+      # Maximum size (in bytes) allowed for a single API request.  This
+      # limit is published in the discovery document for use by clients.
+      # Note: You must separately configure the upstream web server or
+      # proxy to actually enforce the desired maximum request size on the
+      # server side.
+      MaxRequestSize: 134217728
+
+      # Limit the number of bytes read from the database during an index
+      # request (by retrieving and returning fewer rows than would
+      # normally be returned in a single response).
+      # Note 1: This setting never reduces the number of returned rows to
+      # zero, no matter how big the first data row is.
+      # Note 2: Currently, this is only checked against a specific set of
+      # columns that tend to get large (collections.manifest_text,
+      # containers.mounts, workflows.definition). Other fields (e.g.,
+      # "properties" hashes) are not counted against this limit.
+      MaxIndexDatabaseRead: 134217728
+
+      # Maximum number of items to return when responding to a APIs that
+      # can return partial result sets using limit and offset parameters
+      # (e.g., *.index, groups.contents). If a request specifies a "limit"
+      # parameter higher than this value, this value is used instead.
+      MaxItemsPerResponse: 1000
+
+      # API methods to disable. Disabled methods are not listed in the
+      # discovery document, and respond 404 to all requests.
+      # Example: ["jobs.create", "pipeline_instances.create"]
+      DisabledAPIs: []
+
+      # Interval (seconds) between asynchronous permission view updates. Any
+      # permission-updating API called with the 'async' parameter schedules a an
+      # update on the permission view in the future, if not already scheduled.
+      AsyncPermissionsUpdateInterval: 20
+
+      # RailsSessionSecretToken is a string of alphanumeric characters
+      # used by Rails to sign session tokens. IMPORTANT: This is a
+      # site secret. It should be at least 50 characters.
+      RailsSessionSecretToken: ""
+
+    Users:
+      # Config parameters to automatically setup new users.  If enabled,
+      # this users will be able to self-activate.  Enable this if you want
+      # to run an open instance where anyone can create an account and use
+      # the system without requiring manual approval.
+      #
+      # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+      # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+      AutoSetupNewUsers: false
+      AutoSetupNewUsersWithVmUUID: ""
+      AutoSetupNewUsersWithRepository: false
+      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+      # When new_users_are_active is set to true, new users will be active
+      # immediately.  This skips the "self-activate" step which enforces
+      # user agreements.  Should only be enabled for development.
+      NewUsersAreActive: false
+
+      # The e-mail address of the user you would like to become marked as an admin
+      # user on their first login.
+      # In the default configuration, authentication happens through the Arvados SSO
+      # server, which uses OAuth2 against Google's servers, so in that case this
+      # should be an address associated with a Google account.
+      AutoAdminUserWithEmail: ""
+
+      # If auto_admin_first_user is set to true, the first user to log in when no
+      # other admin users exist will automatically become an admin user.
+      AutoAdminFirstUser: false
+
+      # Email address to notify whenever a user creates a profile for the
+      # first time
+      UserProfileNotificationAddress: ""
+      AdminNotifierEmailFrom: arvados@example.com
+      EmailSubjectPrefix: "[ARVADOS] "
+      UserNotifierEmailFrom: arvados@example.com
+      NewUserNotificationRecipients: []
+      NewInactiveUserNotificationRecipients: []
+
+    AuditLogs:
+      # Time to keep audit logs, in seconds. (An audit log is a row added
+      # to the "logs" table in the PostgreSQL database each time an
+      # Arvados object is created, modified, or deleted.)
+      #
+      # Currently, websocket event notifications rely on audit logs, so
+      # this should not be set lower than 600 (5 minutes).
+      MaxAge: 1209600
+
+      # Maximum number of log rows to delete in a single SQL transaction.
+      #
+      # If max_audit_log_delete_batch is 0, log entries will never be
+      # deleted by Arvados. Cleanup can be done by an external process
+      # without affecting any Arvados system processes, as long as very
+      # recent (<5 minutes old) logs are not deleted.
+      #
+      # 100000 is a reasonable batch size for most sites.
+      MaxDeleteBatch: 0
+
+      # Attributes to suppress in events and audit logs.  Notably,
+      # specifying ["manifest_text"] here typically makes the database
+      # smaller and faster.
+      #
+      # Warning: Using any non-empty value here can have undesirable side
+      # effects for any client or component that relies on event logs.
+      # Use at your own risk.
+      UnloggedAttributes: []
+
+    SystemLogs:
+      # Maximum characters of (JSON-encoded) query parameters to include
+      # in each request log entry. When params exceed this size, they will
+      # be JSON-encoded, truncated to this size, and logged as
+      # params_truncated.
+      MaxRequestLogParamsSize: 2000
+
+    Collections:
+      # Allow clients to create collections by providing a manifest with
+      # unsigned data blob locators. IMPORTANT: This effectively disables
+      # access controls for data stored in Keep: a client who knows a hash
+      # can write a manifest that references the hash, pass it to
+      # collections.create (which will create a permission link), use
+      # collections.get to obtain a signature for that data locator, and
+      # use that signed locator to retrieve the data from Keep. Therefore,
+      # do not turn this on if your users expect to keep data private from
+      # one another!
+      BlobSigning: true
+
+      # blob_signing_key is a string of alphanumeric characters used to
+      # generate permission signatures for Keep locators. It must be
+      # identical to the permission key given to Keep. IMPORTANT: This is
+      # a site secret. It should be at least 50 characters.
+      #
+      # Modifying blob_signing_key will invalidate all existing
+      # signatures, which can cause programs to fail (e.g., arv-put,
+      # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
+      # no such processes are running.
+      BlobSigningKey: ""
+
+      # Default replication level for collections. This is used when a
+      # collection's replication_desired attribute is nil.
+      DefaultReplication: 2
+
+      # Lifetime (in seconds) of blob permission signatures generated by
+      # the API server. This determines how long a client can take (after
+      # retrieving a collection record) to retrieve the collection data
+      # from Keep. If the client needs more time than that (assuming the
+      # collection still has the same content and the relevant user/token
+      # still has permission) the client can retrieve the collection again
+      # to get fresh signatures.
+      #
+      # This must be exactly equal to the -blob-signature-ttl flag used by
+      # keepstore servers.  Otherwise, reading data blocks and saving
+      # collections will fail with HTTP 403 permission errors.
+      #
+      # Modifying blob_signature_ttl invalidates existing signatures; see
+      # blob_signing_key note above.
+      #
+      # The default is 2 weeks.
+      BlobSigningTTL: 1209600
+
+      # Default lifetime for ephemeral collections: 2 weeks. This must not
+      # be less than blob_signature_ttl.
+      DefaultTrashLifetime: 1209600
+
+      # Interval (seconds) between trash sweeps. During a trash sweep,
+      # collections are marked as trash if their trash_at time has
+      # arrived, and deleted if their delete_at time has arrived.
+      TrashSweepInterval: 60
+
+      # If true, enable collection versioning.
+      # When a collection's preserve_version field is true or the current version
+      # is older than the amount of seconds defined on preserve_version_if_idle,
+      # a snapshot of the collection's previous state is created and linked to
+      # the current collection.
+      CollectionVersioning: false
+
+      #   0 = auto-create a new version on every update.
+      #  -1 = never auto-create new versions.
+      # > 0 = auto-create a new version when older than the specified number of seconds.
+      PreserveVersionIfIdle: -1
+
+    Login:
+      # These settings are provided by your OAuth2 provider (e.g.,
+      # sso-provider).
+      ProviderAppSecret: ""
+      ProviderAppID: ""
+
+    Git:
+      # Git repositories must be readable by api server, or you won't be
+      # able to submit crunch jobs. To pass the test suites, put a clone
+      # of the arvados tree in {git_repositories_dir}/arvados.git or
+      # {git_repositories_dir}/arvados/.git
+      Repositories: /var/lib/arvados/git/repositories
+
+    TLS:
+      Insecure: false
+
+    Containers:
+      # List of supported Docker Registry image formats that compute nodes
+      # are able to use. `arv keep docker` will error out if a user tries
+      # to store an image with an unsupported format. Use an empty array
+      # to skip the compatibility check (and display a warning message to
+      # that effect).
+      #
+      # Example for sites running docker < 1.10: ["v1"]
+      # Example for sites running docker >= 1.10: ["v2"]
+      # Example for disabling check: []
+      SupportedDockerImageFormats: ["v2"]
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      # Default value for keep_cache_ram of a container's runtime_constraints.
+      DefaultKeepCacheRAM: 268435456
+
+      # Number of times a container can be unlocked before being
+      # automatically cancelled.
+      MaxDispatchAttempts: 5
+
+      # Default value for container_count_max for container requests.  This is the
+      # number of times Arvados will create a new container to satisfy a container
+      # request.  If a container is cancelled it will retry a new container if
+      # container_count < container_count_max on any container requests associated
+      # with the cancelled container.
+      MaxRetryAttempts: 3
+
+      # The maximum number of compute nodes that can be in use simultaneously
+      # If this limit is reduced, any existing nodes with slot number >= new limit
+      # will not be counted against the new limit. In other words, the new limit
+      # won't be strictly enforced until those nodes with higher slot numbers
+      # go down.
+      MaxComputeVMs: 64
+
+      # Preemptible instance support (e.g. AWS Spot Instances)
+      # When true, child containers will get created with the preemptible
+      # scheduling parameter parameter set.
+      UsePreemptibleInstances: false
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      Logging:
+        # When you run the db:delete_old_container_logs task, it will find
+        # containers that have been finished for at least this many seconds,
+        # and delete their stdout, stderr, arv-mount, crunch-run, and
+        # crunchstat logs from the logs table.
+        MaxAge: 720h
+
+        # These two settings control how frequently log events are flushed to the
+        # database.  Log lines are buffered until either crunch_log_bytes_per_event
+        # has been reached or crunch_log_seconds_between_events has elapsed since
+        # the last flush.
+        LogBytesPerEvent: 4096
+        LogSecondsBetweenEvents: 1
+
+        # The sample period for throttling logs, in seconds.
+        LogThrottlePeriod: 60
+
+        # Maximum number of bytes that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleBytes: 65536
+
+        # Maximum number of lines that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleLines: 1024
+
+        # Maximum bytes that may be logged by a single job.  Log bytes that are
+        # silenced by throttling are not counted against this total.
+        LimitLogBytesPerJob: 67108864
+
+        LogPartialLineThrottlePeriod: 5
+
+        # Container logs are written to Keep and saved in a collection,
+        # which is updated periodically while the container runs.  This
+        # value sets the interval (given in seconds) between collection
+        # updates.
+        LogUpdatePeriod: 1800
+
+        # The log collection is also updated when the specified amount of
+        # log data (given in bytes) is produced in less than one update
+        # period.
+        LogUpdateSize: 33554432
+
+      SLURM:
+        Managed:
+          # Path to dns server configuration directory
+          # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+          # files or touch restart.txt (see below).
+          DNSServerConfDir: ""
+
+          # Template file for the dns server host snippets. See
+          # unbound.template in this directory for an example. If false, do
+          # not write any config files.
+          DNSServerConfTemplate: ""
+
+          # String to write to {dns_server_conf_dir}/restart.txt (with a
+          # trailing newline) after updating local data. If false, do not
+          # open or write the restart.txt file.
+          DNSServerReloadCommand: ""
+
+          # Command to run after each DNS update. Template variables will be
+          # substituted; see the "unbound" example below. If false, do not run
+          # a command.
+          DNSServerUpdateCommand: ""
+
+          ComputeNodeDomain: ""
+          ComputeNodeNameservers:
+            - 192.168.1.1
+
+          # Hostname to assign to a compute node when it sends a "ping" and the
+          # hostname in its Node record is nil.
+          # During bootstrapping, the "ping" script is expected to notice the
+          # hostname given in the ping response, and update its unix hostname
+          # accordingly.
+          # If false, leave the hostname alone (this is appropriate if your compute
+          # nodes' hostnames are already assigned by some other mechanism).
+          #
+          # One way or another, the hostnames of your node records should agree
+          # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+          #
+          # Example for compute0000, compute0001, ....:
+          # assign_node_hostname: compute%<slot_number>04d
+          # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+          AssignNodeHostname: "compute%<slot_number>d"
+
+      JobsAPI:
+        # Enable the legacy Jobs API.  This value must be a string.
+        # 'auto' -- (default) enable the Jobs API only if it has been used before
+        #         (i.e., there are job records in the database)
+        # 'true' -- enable the Jobs API despite lack of existing records.
+        # 'false' -- disable the Jobs API despite presence of existing records.
+        Enable: 'auto'
+
+        # Git repositories must be readable by api server, or you won't be
+        # able to submit crunch jobs. To pass the test suites, put a clone
+        # of the arvados tree in {git_repositories_dir}/arvados.git or
+        # {git_repositories_dir}/arvados/.git
+        GitInternalDir: /var/lib/arvados/internal.git
+
+        # Docker image to be used when none found in runtime_constraints of a job
+        DefaultDockerImage: ""
+
+        # none or slurm_immediate
+        CrunchJobWrapper: none
+
+        # username, or false = do not set uid when running jobs.
+        CrunchJobUser: crunch
+
+        # The web service must be able to create/write this file, and
+        # crunch-job must be able to stat() it.
+        CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+        # Control job reuse behavior when two completed jobs match the
+        # search criteria and have different outputs.
+        #
+        # If true, in case of a conflict, reuse the earliest job (this is
+        # similar to container reuse behavior).
+        #
+        # If false, in case of a conflict, do not reuse any completed job,
+        # but do reuse an already-running job if available (this is the
+        # original job reuse behavior, and is still the default).
+        ReuseJobIfOutputsDiffer: false
+
+    Mail:
+      MailchimpAPIKey: ""
+      MailchimpListID: ""
+      SendUserSetupNotificationEmail: ""
+      IssueReporterEmailFrom: ""
+      IssueReporterEmailTo: ""
+      SupportEmailAddress: ""
+      EmailFrom: ""
+    RemoteClusters:
+      "*":
+        Proxy: false
+        ActivateUsers: false
index 3c63fe51e6e89a116a40ea5c72917a5d4528ab41..daf7977ad50c5af0380ee44282bf81ff439e4d3b 100644 (file)
@@ -74,6 +74,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
                        defer wg.Done()
                        err := cq.Unlock(uuid)
                        c.Check(err, check.NotNil)
+                       c.Check(err, check.ErrorMatches, ".*cannot unlock when Queued*.")
 
                        err = cq.Lock(uuid)
                        c.Check(err, check.IsNil)
@@ -101,9 +102,6 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {
                }()
        }
        wg.Wait()
-
-       err = cq.Cancel(arvadostest.CompletedContainerUUID)
-       c.Check(err, check.ErrorMatches, `.*State cannot change from Complete to Cancelled.*`)
 }
 
 func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) {
index c35842616696f7dc00d047969ea74f27aad332df..eeb44dbd7f232193b6e7102f85e3ae905b71dfc9 100644 (file)
@@ -442,17 +442,16 @@ http://doc.arvados.org/install/install-api-server.html#disable_api_methods
                                               num_retries=self.num_retries)
 
         for k,v in generatemapper.items():
-            if k.startswith("_:"):
-                if v.type == "Directory":
+            if v.type == "Directory" and v.resolved.startswith("_:"):
                     continue
-                if v.type == "CreateFile":
-                    with final.open(v.target, "wb") as f:
-                        f.write(v.resolved.encode("utf-8"))
+            if v.type == "CreateFile" and (k.startswith("_:") or v.resolved.startswith("_:")):
+                with final.open(v.target, "wb") as f:
+                    f.write(v.resolved.encode("utf-8"))
                     continue
 
-            if not k.startswith("keep:"):
+            if not v.resolved.startswith("keep:"):
                 raise Exception("Output source is not in keep or a literal")
-            sp = k.split("/")
+            sp = v.resolved.split("/")
             srccollection = sp[0][5:]
             try:
                 reader = self.collection_cache.get(srccollection)
@@ -462,7 +461,8 @@ http://doc.arvados.org/install/install-api-server.html#disable_api_methods
                 logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
                 raise
             except IOError as e:
-                logger.warning("While preparing output collection: %s", e)
+                logger.error("While preparing output collection: %s", e)
+                raise
 
         def rewrite(fileobj):
             fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
index 38135899dca7c66a10a135c85d2c0f8db43b0f2a..56c15a4a4344d6c467c0c7dba74b8ae5b126e280 100644 (file)
@@ -265,6 +265,13 @@ class ArvPathMapper(PathMapper):
 
 
 class StagingPathMapper(PathMapper):
+    # Note that StagingPathMapper internally maps files from target to source.
+    # Specifically, the 'self._pathmap' dict keys are the target location and the
+    # values are 'MapperEnt' named tuples from which we use the 'resolved' attribute
+    # as the file identifier. This makes it possible to map an input file to multiple
+    # target directories. The exception is for file literals, which store the contents of
+    # the file in 'MapperEnt.resolved' and are therefore still mapped from source to target.
+
     _follow_dirs = True
 
     def __init__(self, referenced_files, basedir, stagedir, separateDirs=True):
@@ -276,31 +283,51 @@ class StagingPathMapper(PathMapper):
         loc = obj["location"]
         tgt = os.path.join(stagedir, obj["basename"])
         basetgt, baseext = os.path.splitext(tgt)
+
+        def targetExists():
+            return tgt in self.targets and ("contents" not in obj) and (self._pathmap[tgt].resolved != loc)
+        def literalTargetExists():
+            return tgt in self.targets and "contents" in obj
+
         n = 1
-        if tgt in self.targets and (self.reversemap(tgt)[0] != loc):
+        if targetExists() or literalTargetExists():
             while tgt in self.targets:
                 n += 1
                 tgt = "%s_%i%s" % (basetgt, n, baseext)
         self.targets.add(tgt)
         if obj["class"] == "Directory":
             if obj.get("writable"):
-                self._pathmap[loc] = MapperEnt(loc, tgt, "WritableDirectory", staged)
+                self._pathmap[tgt] = MapperEnt(loc, tgt, "WritableDirectory", staged)
             else:
-                self._pathmap[loc] = MapperEnt(loc, tgt, "Directory", staged)
+                self._pathmap[tgt] = MapperEnt(loc, tgt, "Directory", staged)
             if loc.startswith("_:") or self._follow_dirs:
                 self.visitlisting(obj.get("listing", []), tgt, basedir)
         elif obj["class"] == "File":
-            if loc in self._pathmap:
+            if tgt in self._pathmap:
                 return
             if "contents" in obj and loc.startswith("_:"):
                 self._pathmap[loc] = MapperEnt(obj["contents"], tgt, "CreateFile", staged)
             else:
                 if copy or obj.get("writable"):
-                    self._pathmap[loc] = MapperEnt(loc, tgt, "WritableFile", staged)
+                    self._pathmap[tgt] = MapperEnt(loc, tgt, "WritableFile", staged)
                 else:
-                    self._pathmap[loc] = MapperEnt(loc, tgt, "File", staged)
+                    self._pathmap[tgt] = MapperEnt(loc, tgt, "File", staged)
                 self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
 
+    def mapper(self, src):  # type: (Text) -> MapperEnt.
+        # Overridden to maintain the use case of mapping by source (identifier) to
+        # target regardless of how the map is structured interally.
+        def getMapperEnt(src):
+            for k,v in viewitems(self._pathmap):
+                if (v.type != "CreateFile" and v.resolved == src) or (v.type == "CreateFile" and k == src):
+                    return v
+
+        if u"#" in src:
+            i = src.index(u"#")
+            v = getMapperEnt(src[i:])
+            return MapperEnt(v.resolved, v.target + src[i:], v.type, v.staged)
+        return getMapperEnt(src)
+
 
 class VwdPathMapper(StagingPathMapper):
     def setup(self, referenced_files, basedir):
index 562d1765daa24254cf4ad1a33b03af1129f57eff..127b3f372bed10615ce6fa701740201e649b1b6b 100644 (file)
@@ -81,3 +81,94 @@ class TestMakeOutput(unittest.TestCase):
         self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag0"}), mock.call().execute(num_retries=num_retries)])
         self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag1"}), mock.call().execute(num_retries=num_retries)])
         self.api.links().create.assert_has_calls([mock.call(body={"head_uuid": final_uuid, "link_class": "tag", "name": "tag2"}), mock.call().execute(num_retries=num_retries)])
+
+    @mock.patch("arvados.collection.Collection")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_make_output_for_multiple_file_targets(self, reader, col):
+        keep_client = mock.MagicMock()
+        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)
+        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        final = mock.MagicMock()
+        col.return_value = final
+        readermock = mock.MagicMock()
+        reader.return_value = readermock
+
+        # This output describes a single file listed in 2 different directories
+        _, runner.final_output_collection = runner.make_output_collection("Test output", ["foo"], "", { 'out': [
+        {
+            'basename': 'testdir1',
+            'listing': [
+                {
+                    'basename': 'test.txt',
+                    'nameroot': 'test',
+                    'nameext': '.txt',
+                    'location': 'keep:99999999999999999999999999999991+99/test.txt',
+                    'class': 'File',
+                    'size': 16
+                }
+            ],
+            'location': '_:99999999999999999999999999999992+99',
+            'class': 'Directory'
+        },
+        {
+            'basename': 'testdir2',
+            'listing': [
+                {
+                    'basename': 'test.txt',
+                    'nameroot': 'test',
+                    'nameext': '.txt',
+                    'location': 'keep:99999999999999999999999999999991+99/test.txt',
+                    'class':
+                    'File',
+                    'size': 16
+                }
+            ],
+            'location': '_:99999999999999999999999999999993+99',
+            'class': 'Directory'
+        }]})
+
+        # Check that copy is called on the collection for both locations
+        final.copy.assert_any_call("test.txt", "testdir1/test.txt", source_collection=mock.ANY, overwrite=mock.ANY)
+        final.copy.assert_any_call("test.txt", "testdir2/test.txt", source_collection=mock.ANY, overwrite=mock.ANY)
+
+    @mock.patch("arvados.collection.Collection")
+    @mock.patch("arvados.collection.CollectionReader")
+    def test_make_output_for_literal_name_conflicts(self, reader, col):
+        keep_client = mock.MagicMock()
+        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)
+        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'
+
+        final = mock.MagicMock()
+        col.return_value = final
+        readermock = mock.MagicMock()
+        reader.return_value = readermock
+
+        # This output describes two literals with the same basename
+        _, runner.final_output_collection = runner.make_output_collection("Test output", ["foo"], "",  [
+        {
+            'lit':
+            {
+                'basename': 'a_file',
+                'nameext': '',
+                'nameroot': 'a_file',
+                'location': '_:f168fc0c-4291-40aa-a04e-366d57390560',
+                'class': 'File',
+                'contents': 'Hello file literal.'
+            }
+        },
+        {
+            'lit':
+            {
+                'basename': 'a_file',
+                'nameext': '',
+                'nameroot': 'a_file',
+                'location': '_:1728da8f-c64e-4a3e-b2e2-1ee356be7bc8',
+                'class': 'File',
+                'contents': 'Hello file literal.'
+            }
+        }])
+
+        # Check that the file name conflict is resolved and open is called for both
+        final.open.assert_any_call("a_file", "wb")
+        final.open.assert_any_call("a_file_2", "wb")
\ No newline at end of file
diff --git a/sdk/java-v2/.gitignore b/sdk/java-v2/.gitignore
new file mode 100644 (file)
index 0000000..c928081
--- /dev/null
@@ -0,0 +1,9 @@
+/.gradle/
+/bin/
+/build/
+.project
+.classpath
+/.settings/
+.DS_Store
+/.idea/
+/out/
diff --git a/sdk/java-v2/.licenseignore b/sdk/java-v2/.licenseignore
new file mode 100644 (file)
index 0000000..ecee9c7
--- /dev/null
@@ -0,0 +1,4 @@
+.licenseignore
+agpl-3.0.txt
+apache-2.0.txt
+COPYING
\ No newline at end of file
diff --git a/sdk/java-v2/COPYING b/sdk/java-v2/COPYING
new file mode 100644 (file)
index 0000000..27d8c81
--- /dev/null
@@ -0,0 +1,15 @@
+Unless indicated otherwise in the header of the file, the files in this
+repository are dual-licensed AGPL-3.0 and Apache-2.0
+
+Individual files contain an SPDX tag that indicates the license for the file.
+dual-licensed files use the following tag:
+
+    SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+
+This enables machine processing of license information based on the SPDX
+License Identifiers that are available here: http://spdx.org/licenses/
+
+The full license text for each license is available in this directory:
+
+  AGPL-3.0:     agpl-3.0.txt
+  Apache-2.0:   apache-2.0.txt
diff --git a/sdk/java-v2/README.md b/sdk/java-v2/README.md
new file mode 100644 (file)
index 0000000..ca5aef9
--- /dev/null
@@ -0,0 +1,115 @@
+```
+Copyright (C) The Arvados Authors. All rights reserved.
+SPDX-License-Identifier: CC-BY-SA-3.0
+```
+
+# Arvados Java SDK
+
+##### About
+Arvados Java Client allows to access Arvados servers and uses two APIs:
+* lower level [Keep Server API](https://doc.arvados.org/api/index.html)
+* higher level [Keep-Web API](https://godoc.org/github.com/curoverse/arvados/services/keep-web) (when needed)
+
+##### Required Java version
+This SDK requires Java 8+
+
+##### Logging
+
+SLF4J is used for logging. Concrete logging framework and configuration must be provided by a client.
+
+##### Configuration
+
+[TypeSafe Configuration](https://github.com/lightbend/config) is used for configuring this library.
+
+Please, have a look at java/resources/reference.conf for default values provided with this library.
+
+* **keepweb-host** - change to host of your Keep-Web installation
+* **keepweb-port** - change to port of your Keep-Web installation
+* **host** - change to host of your Arvados installation
+* **port** - change to port of your Arvados installation
+* **token** - authenticates registered user, one must provide
+  [token obtained from Arvados Workbench](https://doc.arvados.org/user/reference/api-tokens.html)
+* **protocol** - don't change to unless really needed
+* **host-insecure** - insecure communication with Arvados (ignores SSL certificate verification), 
+  don't change to *true* unless really needed
+* **split-size** - size of chunk files in megabytes
+* **temp-dir** - temporary chunk files storage
+* **copies** - amount of chunk files duplicates per Keep server
+* **retries** - in case of chunk files send failure this should allow to repeat send 
+  (*NOTE*: this parameter is not used at the moment but was left for future improvements)
+
+In order to override default settings one can create application.conf file in an application.
+Example: src/test/resources/application.conf.
+
+Alternatively ExternalConfigProvider class can be used to pass configuration via code. 
+ExternalConfigProvider comes with a builder and all of the above values must be provided in order for it to work properly.
+
+ArvadosFacade has two constructors, one without arguments that uses values from reference.conf and second one 
+taking ExternalConfigProvider as an argument.
+
+##### API clients
+
+All API clients inherit from BaseStandardApiClient. This class contains implementation of all 
+common methods as described in http://doc.arvados.org/api/methods.html.
+
+Parameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:
+
+```java
+String uuid = "ardev-4zz18-rxcql7qwyakg1r1";
+
+Collection actual = client.get(uuid);
+```
+
+```java
+ListArgument listArgument = ListArgument.builder()
+        .filters(Arrays.asList(
+                Filter.of("owner_uuid", Operator.LIKE, "ardev%"),
+                Filter.of("name", Operator.LIKE, "Super%"),
+                Filter.of("portable_data_hash", Operator.IN, Lists.newArrayList("54f6d9f59065d3c009d4306660989379+65")
+            )))
+        .build();
+
+CollectionList actual = client.list(listArgument);
+```
+
+Non-standard API clients must inherit from BaseApiClient. 
+For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.
+
+##### Business logic
+
+More advanced API data handling could be implemented as *Facade* classes. 
+In current version functionalities provided by SDK are handled by *ArvadosFacade*.
+They include:
+* **downloading single file from collection** - using Keep-Web
+* **downloading whole collection** - using Keep-Web or Keep Server API
+* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details
+* **uploading single file** - to either new or existing collection
+* **uploading list of files** - to either new or existing collection
+* **creating an empty collection**
+* **getting current user info**
+* **listing current user's collections**
+* **creating new project**
+* **deleting certain collection**
+
+##### Note regarding Keep-Web
+
+Current version requires both Keep Web and standard Keep Server API configured in order to use Keep-Web functionalities.
+
+##### Integration tests
+
+In order to run integration tests all fields within following configuration file must be provided: 
+```java
+src/test/resources/integration-test-appliation.conf 
+```
+Parameter **integration-tests.project-uuid** should contain UUID of one project available to user,
+whose token was provided within configuration file. 
+
+Integration tests require connection to real Arvados server.
+
+##### Note regarding file naming
+
+While uploading via this SDK all uploaded files within single collection must have different names.
+This applies also to uploading files to already existing collection. 
+Renaming files with duplicate names is not implemented in current version.
+
diff --git a/sdk/java-v2/agpl-3.0.txt b/sdk/java-v2/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
diff --git a/sdk/java-v2/apache-2.0.txt b/sdk/java-v2/apache-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/sdk/java-v2/build.gradle b/sdk/java-v2/build.gradle
new file mode 100644 (file)
index 0000000..7de7a11
--- /dev/null
@@ -0,0 +1,113 @@
+apply plugin: 'java-library'
+apply plugin: 'eclipse'
+apply plugin: 'idea'
+apply plugin: 'maven'
+apply plugin: 'signing'
+
+
+repositories {
+    mavenCentral()
+}
+
+dependencies {
+    api 'com.squareup.okhttp3:okhttp:3.9.1'
+    api 'com.fasterxml.jackson.core:jackson-databind:2.9.2'
+    api 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.2'
+    api 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.2'
+    api 'commons-codec:commons-codec:1.11'
+    api 'commons-io:commons-io:2.6'
+    api 'com.google.guava:guava:23.4-jre'
+    api 'org.slf4j:slf4j-api:1.7.25'
+    api 'com.typesafe:config:1.3.2'
+    
+    testImplementation 'junit:junit:4.12'
+    testImplementation 'org.mockito:mockito-core:2.12.0'
+    testImplementation 'org.assertj:assertj-core:3.8.0'
+    testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1'
+}
+
+test {
+    useJUnit {
+        excludeCategories 'org.arvados.client.junit.categories.IntegrationTests'
+    }
+
+       testLogging {
+           events "passed", "skipped", "failed"
+           afterSuite { desc, result ->
+               if (!desc.parent) { // will match the outermost suite
+                   println "\n---- Test results ----"
+                   println "${result.resultType} (${result.testCount} tests, ${result.successfulTestCount} successes, ${result.failedTestCount} failures, ${result.skippedTestCount} skipped)"
+                   println ""
+               }
+           }
+       }
+}
+
+task integrationTest(type: Test) {
+    useJUnit {
+        includeCategories 'org.arvados.client.junit.categories.IntegrationTests'
+    }
+}
+
+task javadocJar(type: Jar) {
+    classifier = 'javadoc'
+    from javadoc
+}
+
+task sourcesJar(type: Jar) {
+    classifier = 'sources'
+    from sourceSets.main.allSource
+}
+
+artifacts {
+    archives javadocJar, sourcesJar
+}
+
+signing {
+    sign configurations.archives
+}
+
+uploadArchives {
+  repositories {
+    mavenDeployer {
+      beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }
+
+      repository(url: "https://oss.sonatype.org/service/local/staging/deploy/maven2") {
+        authentication(userName: ossrhUsername, password: ossrhPassword)
+      }
+
+      snapshotRepository(url: "https://oss.sonatype.org/content/repositories/snapshots") {
+        authentication(userName: ossrhUsername, password: ossrhPassword)
+      }
+
+      pom.project {
+        name 'Arvados Java SDK'
+        packaging 'jar'
+        groupId 'org.arvados'
+        description 'Arvados Java SDK'
+        url 'https://github.com/curoverse/arvados'
+               
+       scm {
+         url 'scm:git@https://github.com/curoverse/arvados.git'
+         connection 'scm:git@https://github.com/curoverse/arvados.git'
+         developerConnection 'scm:git@https://github.com/curoverse/arvados.git'
+       }
+
+        licenses {
+          license {
+            name 'The Apache License, Version 2.0'
+            url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
+          }
+        }
+
+        developers {
+          developer {
+            id 'veritasgenetics'
+            name 'Veritas Genetics'
+            email 'ops@veritasgenetics.com'
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/sdk/java-v2/gradle.properties b/sdk/java-v2/gradle.properties
new file mode 100644 (file)
index 0000000..1d087e4
--- /dev/null
@@ -0,0 +1,8 @@
+/*
+ Copyright (C) The Arvados Authors. All rights reserved.
+
+ SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+*/
+
+ossrhUsername = ''
+ossrhPassword = ''
diff --git a/sdk/java-v2/settings.gradle b/sdk/java-v2/settings.gradle
new file mode 100644 (file)
index 0000000..6bdaa30
--- /dev/null
@@ -0,0 +1 @@
+rootProject.name = 'arvados-java-sdk'
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
new file mode 100644 (file)
index 0000000..7e8a297
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.arvados.client.exception.ArvadosApiException;
+import org.arvados.client.api.client.factory.OkHttpClientFactory;
+import org.arvados.client.api.model.ApiError;
+import org.arvados.client.config.ConfigProvider;
+import okhttp3.OkHttpClient;
+import okhttp3.Request;
+import okhttp3.Response;
+import okhttp3.ResponseBody;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.nio.charset.StandardCharsets;
+import java.util.Objects;
+
+abstract class BaseApiClient {
+
+    static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules();
+
+    final OkHttpClient client;
+    final ConfigProvider config;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseApiClient.class);
+
+    BaseApiClient(ConfigProvider config) {
+        this.config = config;
+        client = OkHttpClientFactory.builder()
+                .build()
+                .create(config.isApiHostInsecure());
+    }
+
+    Request.Builder getRequestBuilder() {
+        return new Request.Builder()
+                .addHeader("authorization", String.format("OAuth2 %s", config.getApiToken()))
+                .addHeader("cache-control", "no-cache");
+    }
+
+    String newCall(Request request) {
+        return (String) getResponseBody(request, body -> body.string().trim());
+    }
+
+    byte[] newFileCall(Request request) {
+        return (byte[]) getResponseBody(request, ResponseBody::bytes);
+    }
+
+    private Object getResponseBody(Request request, Command command) {
+        try {
+            log.debug(URLDecoder.decode(request.toString(), StandardCharsets.UTF_8.name()));
+        } catch (UnsupportedEncodingException e) {
+            throw new ArvadosApiException(e);
+        }
+
+        try (Response response = client.newCall(request).execute()) {
+            ResponseBody responseBody = response.body();
+
+            if (!response.isSuccessful()) {
+                String errorBody = Objects.requireNonNull(responseBody).string();
+                if (errorBody == null || errorBody.length() == 0) {
+                    throw new ArvadosApiException(String.format("Error code %s with message: %s", response.code(), response.message()));
+                }
+                ApiError apiError = MAPPER.readValue(errorBody, ApiError.class);
+                throw new ArvadosApiException(String.format("Error code %s with messages: %s", response.code(), apiError.getErrors()));
+            }
+            return command.readResponseBody(responseBody);
+        } catch (IOException e) {
+            throw new ArvadosApiException(e);
+        }
+    }
+
+    private interface Command {
+        Object readResponseBody(ResponseBody body) throws IOException;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
new file mode 100644 (file)
index 0000000..ab03d34
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import okhttp3.MediaType;
+import okhttp3.HttpUrl;
+import okhttp3.HttpUrl.Builder;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+import org.arvados.client.exception.ArvadosApiException;
+import org.arvados.client.api.model.Item;
+import org.arvados.client.api.model.ItemList;
+import org.arvados.client.api.model.argument.ListArgument;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.util.Map;
+
+public abstract class BaseStandardApiClient<T extends Item, L extends ItemList> extends BaseApiClient {
+
+    private static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class);
+
+    BaseStandardApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public L list(ListArgument listArguments) {
+        log.debug("Get list of {}", getType().getSimpleName());
+        Builder urlBuilder = getUrlBuilder();
+        addQueryParameters(urlBuilder, listArguments);
+        HttpUrl url = urlBuilder.build();
+        Request request = getRequestBuilder().url(url).build();
+        return callForList(request);
+    }
+    
+    public L list() {
+        return list(ListArgument.builder().build());
+    }
+
+    public T get(String uuid) {
+        log.debug("Get {} by UUID {}", getType().getSimpleName(), uuid);
+        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();
+        Request request = getRequestBuilder().get().url(url).build();
+        return callForType(request);
+    }
+
+    public T create(T type) {
+        log.debug("Create {}", getType().getSimpleName());
+        String json = mapToJson(type);
+        RequestBody body = RequestBody.create(JSON, json);
+        Request request = getRequestBuilder().post(body).build();
+        return callForType(request);
+    }
+
+    public T delete(String uuid) {
+        log.debug("Delete {} by UUID {}", getType().getSimpleName(), uuid);
+        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();
+        Request request = getRequestBuilder().delete().url(url).build();
+        return callForType(request);
+    }
+
+    public T update(T type) {
+        String uuid = type.getUuid();
+        log.debug("Update {} by UUID {}", getType().getSimpleName(), uuid);
+        String json = mapToJson(type);
+        RequestBody body = RequestBody.create(JSON, json);
+        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();
+        Request request = getRequestBuilder().put(body).url(url).build();
+        return callForType(request);
+    }
+
+    @Override
+    Request.Builder getRequestBuilder() {
+        return super.getRequestBuilder().url(getUrlBuilder().build());
+    }
+
+    HttpUrl.Builder getUrlBuilder() {
+        return new HttpUrl.Builder()
+                .scheme(config.getApiProtocol())
+                .host(config.getApiHost())
+                .port(config.getApiPort())
+                .addPathSegment("arvados")
+                .addPathSegment("v1")
+                .addPathSegment(getResource());
+    }
+
+    <TL> TL call(Request request, Class<TL> cls) {
+        String bodyAsString = newCall(request);
+        try {
+            return mapToObject(bodyAsString, cls);
+        } catch (IOException e) {
+            throw new ArvadosApiException("A problem occurred while parsing JSON data", e);
+        }
+    }
+
+    private <TL> TL mapToObject(String content, Class<TL> cls) throws IOException {
+        return MAPPER.readValue(content, cls);
+    }
+
+    private <TL> String mapToJson(TL type) {
+        ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter();
+        try {
+            return writer.writeValueAsString(type);
+        } catch (JsonProcessingException e) {
+            log.error(e.getMessage());
+            return null;
+        }
+    }
+
+    T callForType(Request request) {
+        return call(request, getType());
+    }
+
+    L callForList(Request request) {
+        return call(request, getListType());
+    }
+
+    abstract String getResource();
+
+    abstract Class<T> getType();
+
+    abstract Class<L> getListType();
+    
+    Request getNoArgumentMethodRequest(String method) {
+        HttpUrl url = getUrlBuilder().addPathSegment(method).build();
+        return getRequestBuilder().get().url(url).build();
+    }
+    
+    RequestBody getJsonRequestBody(Object object) {
+        return RequestBody.create(JSON, mapToJson(object));
+    }
+    
+    void addQueryParameters(Builder urlBuilder, Object object) {
+        Map<String, Object> queryMap = MAPPER.convertValue(object, new TypeReference<Map<String, Object>>() {});
+        queryMap.keySet().forEach(key -> {
+            Object type = queryMap.get(key);
+            if (!(type instanceof String)) {
+                type = mapToJson(type);
+            }
+            urlBuilder.addQueryParameter(key, (String) type);
+        });
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
new file mode 100644 (file)
index 0000000..141f02d
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+public class CollectionsApiClient extends BaseStandardApiClient<Collection, CollectionList> {
+
+    private static final String RESOURCE = "collections";
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class);
+
+    public CollectionsApiClient(ConfigProvider config) {
+        super(config);
+    }
+    
+    @Override
+    public Collection create(Collection type) {
+        Collection newCollection = super.create(type);
+        log.debug(String.format("New collection '%s' with UUID %s has been created", newCollection.getName(), newCollection.getUuid()));
+        return newCollection;
+    }
+
+    @Override
+    String getResource() {
+        return RESOURCE;
+    }
+
+    @Override
+    Class<Collection> getType() {
+        return Collection.class;
+    }
+
+    @Override
+    Class<CollectionList> getListType() {
+        return CollectionList.class;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java
new file mode 100644 (file)
index 0000000..43fcdba
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.MediaType;
+import okhttp3.RequestBody;
+import okio.BufferedSink;
+import okio.Okio;
+import okio.Source;
+import org.slf4j.Logger;
+
+import java.io.File;
+
+/**
+ * Based on:
+ * {@link} https://gist.github.com/eduardb/dd2dc530afd37108e1ac
+ */
+public class CountingFileRequestBody extends RequestBody {
+
+    private static final int SEGMENT_SIZE = 2048; // okio.Segment.SIZE
+    private static final MediaType CONTENT_BINARY = MediaType.parse(com.google.common.net.MediaType.OCTET_STREAM.toString());
+
+    private final File file;
+    private final ProgressListener listener;
+
+    CountingFileRequestBody(final File file, final ProgressListener listener) {
+        this.file = file;
+        this.listener = listener;
+    }
+
+    @Override
+    public long contentLength() {
+        return file.length();
+    }
+
+    @Override
+    public MediaType contentType() {
+        return CONTENT_BINARY;
+    }
+
+    @Override
+    public void writeTo(BufferedSink sink) {
+        try (Source source = Okio.source(file)) {
+            long total = 0;
+            long read;
+
+            while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) {
+                total += read;
+                sink.flush();
+                listener.updateProgress(total);
+
+            }
+        } catch (RuntimeException rethrown) {
+            throw rethrown;
+        } catch (Exception ignored) {
+            //ignore
+        }
+    }
+
+    static class TransferData {
+
+        private final Logger log = org.slf4j.LoggerFactory.getLogger(TransferData.class);
+        private int progressValue;
+        private long totalSize;
+
+        TransferData(long totalSize) {
+            this.progressValue = 0;
+            this.totalSize = totalSize;
+        }
+
+        void updateTransferProgress(long transferred) {
+            float progress = (transferred / (float) totalSize) * 100;
+            if (progressValue != (int) progress) {
+                progressValue = (int) progress;
+                log.debug("{} / {} / {}%", transferred, totalSize, progressValue);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java
new file mode 100644 (file)
index 0000000..75aa9ca
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.HttpUrl;
+import okhttp3.HttpUrl.Builder;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+import org.arvados.client.api.model.Group;
+import org.arvados.client.api.model.GroupList;
+import org.arvados.client.api.model.argument.ContentsGroup;
+import org.arvados.client.api.model.argument.UntrashGroup;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+public class GroupsApiClient extends BaseStandardApiClient<Group, GroupList> {
+
+    private static final String RESOURCE = "groups";
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(GroupsApiClient.class);
+
+    public GroupsApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public GroupList contents(ContentsGroup contentsGroup) {
+        log.debug("Get {} contents", getType().getSimpleName());
+        Builder urlBuilder = getUrlBuilder().addPathSegment("contents");
+        addQueryParameters(urlBuilder, contentsGroup);
+        HttpUrl url = urlBuilder.build();
+        Request request = getRequestBuilder().url(url).build();
+        return callForList(request);
+    }
+
+    public Group untrash(UntrashGroup untrashGroup) {
+        log.debug("Untrash {} by UUID {}", getType().getSimpleName(), untrashGroup.getUuid());
+        HttpUrl url = getUrlBuilder().addPathSegment(untrashGroup.getUuid()).addPathSegment("untrash").build();
+        RequestBody requestBody = getJsonRequestBody(untrashGroup);
+        Request request = getRequestBuilder().post(requestBody).url(url).build();
+        return callForType(request);
+    }
+
+    @Override
+    String getResource() {
+        return RESOURCE;
+    }
+
+    @Override
+    Class<Group> getType() {
+        return Group.class;
+    }
+
+    @Override
+    Class<GroupList> getListType() {
+        return GroupList.class;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServerApiClient.java
new file mode 100644 (file)
index 0000000..a9306ca
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.Request;
+import okhttp3.RequestBody;
+import org.arvados.client.api.client.CountingFileRequestBody.TransferData;
+import org.arvados.client.common.Headers;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.util.Map;
+
+public class KeepServerApiClient extends BaseApiClient {
+
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(KeepServerApiClient.class);
+
+    public KeepServerApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public String upload(String url, Map<String, String> headers, File body) {
+
+        log.debug("Upload file {} to server location {}", body, url);
+
+        final TransferData transferData = new TransferData(body.length());
+
+        RequestBody requestBody =  new CountingFileRequestBody(body, transferData::updateTransferProgress);
+
+        Request request = getRequestBuilder()
+                .url(url)
+                .addHeader(Headers.X_KEEP_DESIRED_REPLICAS, headers.get(Headers.X_KEEP_DESIRED_REPLICAS))
+                .put(requestBody)
+                .build();
+
+        return newCall(request);
+    }
+
+    public byte[] download(String url) {
+
+        Request request = getRequestBuilder()
+                .url(url)
+                .get()
+                .build();
+
+        return newFileCall(request);
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServicesApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepServicesApiClient.java
new file mode 100644 (file)
index 0000000..81a9d6f
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import org.arvados.client.api.model.KeepService;
+import org.arvados.client.api.model.KeepServiceList;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+public class KeepServicesApiClient extends BaseStandardApiClient<KeepService, KeepServiceList> {
+
+    private static final String RESOURCE = "keep_services";
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(KeepServicesApiClient.class);
+
+    public KeepServicesApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public KeepServiceList accessible() {
+        log.debug("Get list of accessible {}", getType().getSimpleName());
+        return callForList(getNoArgumentMethodRequest("accessible"));
+    }
+
+    @Override
+    String getResource() {
+        return RESOURCE;
+    }
+
+    @Override
+    Class<KeepService> getType() {
+        return KeepService.class;
+    }
+
+    @Override
+    Class<KeepServiceList> getListType() {
+        return KeepServiceList.class;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
new file mode 100644 (file)
index 0000000..4cd08b7
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.HttpUrl;
+import okhttp3.Request;
+import org.arvados.client.config.ConfigProvider;
+
+public class KeepWebApiClient extends BaseApiClient {
+
+    public KeepWebApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public byte[] download(String collectionUuid, String filePathName) {
+        Request request = getRequestBuilder()
+                .url(getUrlBuilder(collectionUuid,filePathName).build())
+                .get()
+                .build();
+
+        return newFileCall(request);
+    }
+
+    private HttpUrl.Builder getUrlBuilder(String collectionUuid, String filePathName) {
+        return new HttpUrl.Builder()
+                .scheme(config.getApiProtocol())
+                .host(config.getKeepWebHost())
+                .port(config.getKeepWebPort())
+                .addPathSegment("c=" + collectionUuid)
+                .addPathSegment(filePathName);
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java
new file mode 100644 (file)
index 0000000..8563adc
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+@FunctionalInterface
+public interface ProgressListener {
+
+    void updateProgress(long num);
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java
new file mode 100644 (file)
index 0000000..5bf1d07
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.Request;
+import org.arvados.client.api.model.User;
+import org.arvados.client.api.model.UserList;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+public class UsersApiClient extends BaseStandardApiClient<User, UserList> {
+
+    private static final String RESOURCE = "users";
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(UsersApiClient.class);
+
+    public UsersApiClient(ConfigProvider config) {
+        super(config);
+    }
+
+    public User current() {
+        log.debug("Get current {}", getType().getSimpleName());
+        Request request = getNoArgumentMethodRequest("current");
+        return callForType(request);
+    }
+
+    public User system() {
+        log.debug("Get system {}", getType().getSimpleName());
+        Request request = getNoArgumentMethodRequest("system");
+        return callForType(request);
+    }
+
+    @Override
+    String getResource() {
+        return RESOURCE;
+    }
+
+    @Override
+    Class<User> getType() {
+        return User.class;
+    }
+
+    @Override
+    Class<UserList> getListType() {
+        return UserList.class;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java
new file mode 100644 (file)
index 0000000..0e95e66
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client.factory;
+
+import okhttp3.OkHttpClient;
+import org.arvados.client.exception.ArvadosClientException;
+import org.slf4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSocketFactory;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+import java.security.KeyManagementException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.security.cert.X509Certificate;
+
+public class OkHttpClientFactory {
+
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(OkHttpClientFactory.class);
+
+    OkHttpClientFactory() {
+    }
+
+    public static OkHttpClientFactoryBuilder builder() {
+        return new OkHttpClientFactoryBuilder();
+    }
+
+    public OkHttpClient create(boolean apiHostInsecure) {
+        OkHttpClient.Builder builder = new OkHttpClient.Builder();
+        if (apiHostInsecure) {
+            trustAllCertificates(builder);
+        }
+        return builder.build();
+    }
+
+    private void trustAllCertificates(OkHttpClient.Builder builder) {
+        log.warn("Creating unsafe OkHttpClient. All SSL certificates will be accepted.");
+        try {
+            // Create a trust manager that does not validate certificate chains
+            final TrustManager[] trustAllCerts = new TrustManager[] { createX509TrustManager() };
+
+            // Install the all-trusting trust manager
+            SSLContext sslContext = SSLContext.getInstance("SSL");
+            sslContext.init(null, trustAllCerts, new SecureRandom());
+            // Create an ssl socket factory with our all-trusting manager
+            final SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory();
+
+            builder.sslSocketFactory(sslSocketFactory, (X509TrustManager) trustAllCerts[0]);
+            builder.hostnameVerifier((hostname, session) -> true);
+        } catch (NoSuchAlgorithmException | KeyManagementException e) {
+            throw new ArvadosClientException("Error establishing SSL context", e);
+        }
+    }
+
+    private static X509TrustManager createX509TrustManager() {
+        return new X509TrustManager() {
+            
+            @Override
+            public void checkClientTrusted(X509Certificate[] chain, String authType) {}
+
+            @Override
+            public void checkServerTrusted(X509Certificate[] chain, String authType) {}
+
+            @Override
+            public X509Certificate[] getAcceptedIssuers() {
+                return new X509Certificate[] {};
+            }
+        };
+    }
+
+    public static class OkHttpClientFactoryBuilder {
+        OkHttpClientFactoryBuilder() {
+        }
+
+        public OkHttpClientFactory build() {
+            return new OkHttpClientFactory();
+        }
+
+        public String toString() {
+            return "OkHttpClientFactory.OkHttpClientFactoryBuilder()";
+        }
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/ApiError.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/ApiError.java
new file mode 100644 (file)
index 0000000..1529f9c
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "errors", "error_token" })
+public class ApiError {
+
+    @JsonProperty("errors")
+    private List<String> errors;
+    @JsonProperty("error_token")
+    private String errorToken;
+
+    public List<String> getErrors() {
+        return this.errors;
+    }
+
+    public String getErrorToken() {
+        return this.errorToken;
+    }
+
+    public void setErrors(List<String> errors) {
+        this.errors = errors;
+    }
+
+    public void setErrorToken(String errorToken) {
+        this.errorToken = errorToken;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Collection.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Collection.java
new file mode 100644 (file)
index 0000000..b1652e2
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.time.LocalDateTime;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "portable_data_hash", "replication_desired", "replication_confirmed_at", "replication_confirmed", "manifest_text", 
+    "name", "description", "properties", "delete_at", "trash_at", "is_trashed" })
+public class Collection extends Item {
+
+    @JsonProperty("portable_data_hash")
+    private String portableDataHash;
+    @JsonProperty("replication_desired")
+    private Integer replicationDesired;
+    @JsonProperty("replication_confirmed_at")
+    private LocalDateTime replicationConfirmedAt;
+    @JsonProperty("replication_confirmed")
+    private Integer replicationConfirmed;
+    @JsonProperty("manifest_text")
+    private String manifestText;
+    @JsonProperty("name")
+    private String name;
+    @JsonProperty("description")
+    private String description;
+    @JsonProperty("properties")
+    private Object properties;
+    @JsonProperty("delete_at")
+    private LocalDateTime deleteAt;
+    @JsonProperty("trash_at")
+    private LocalDateTime trashAt;
+    @JsonProperty("is_trashed")
+    private Boolean trashed;
+
+    public String getPortableDataHash() {
+        return this.portableDataHash;
+    }
+
+    public Integer getReplicationDesired() {
+        return this.replicationDesired;
+    }
+
+    public LocalDateTime getReplicationConfirmedAt() {
+        return this.replicationConfirmedAt;
+    }
+
+    public Integer getReplicationConfirmed() {
+        return this.replicationConfirmed;
+    }
+
+    public String getManifestText() {
+        return this.manifestText;
+    }
+
+    public String getName() {
+        return this.name;
+    }
+
+    public String getDescription() {
+        return this.description;
+    }
+
+    public Object getProperties() {
+        return this.properties;
+    }
+
+    public LocalDateTime getDeleteAt() {
+        return this.deleteAt;
+    }
+
+    public LocalDateTime getTrashAt() {
+        return this.trashAt;
+    }
+
+    public Boolean getTrashed() {
+        return this.trashed;
+    }
+
+    public void setPortableDataHash(String portableDataHash) {
+        this.portableDataHash = portableDataHash;
+    }
+
+    public void setReplicationDesired(Integer replicationDesired) {
+        this.replicationDesired = replicationDesired;
+    }
+
+    public void setReplicationConfirmedAt(LocalDateTime replicationConfirmedAt) {
+        this.replicationConfirmedAt = replicationConfirmedAt;
+    }
+
+    public void setReplicationConfirmed(Integer replicationConfirmed) {
+        this.replicationConfirmed = replicationConfirmed;
+    }
+
+    public void setManifestText(String manifestText) {
+        this.manifestText = manifestText;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    public void setProperties(Object properties) {
+        this.properties = properties;
+    }
+
+    public void setDeleteAt(LocalDateTime deleteAt) {
+        this.deleteAt = deleteAt;
+    }
+
+    public void setTrashAt(LocalDateTime trashAt) {
+        this.trashAt = trashAt;
+    }
+
+    public void setTrashed(Boolean trashed) {
+        this.trashed = trashed;
+    }
+
+    public String toString() {
+        return "Collection(portableDataHash=" + this.getPortableDataHash() + ", replicationDesired=" + this.getReplicationDesired() + ", replicationConfirmedAt=" + this.getReplicationConfirmedAt() + ", replicationConfirmed=" + this.getReplicationConfirmed() + ", manifestText=" + this.getManifestText() + ", name=" + this.getName() + ", description=" + this.getDescription() + ", properties=" + this.getProperties() + ", deleteAt=" + this.getDeleteAt() + ", trashAt=" + this.getTrashAt() + ", trashed=" + this.getTrashed() + ")";
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionList.java
new file mode 100644 (file)
index 0000000..4dae7f6
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class CollectionList extends ItemList {
+
+    @JsonProperty("items")
+    private List<Collection> items;
+
+    public List<Collection> getItems() {
+        return this.items;
+    }
+
+    public void setItems(List<Collection> items) {
+        this.items = items;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java
new file mode 100644 (file)
index 0000000..e9fbdb7
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.time.LocalDateTime;
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "command", "container_count", "container_count_max", "container_image", "container_uuid", "cwd", "environment", "expires_at", 
+    "filters", "log_uuid", "mounts", "output_name", "output_path", "output_uuid", "output_ttl", "priority", "properties", "requesting_container_uuid", 
+    "runtime_constraints", "scheduling_parameters", "state", "use_existing" })
+public class Group extends Item {
+
+    @JsonProperty("name")
+    private String name;
+    @JsonProperty("group_class")
+    private String groupClass;
+    @JsonProperty("description")
+    private String description;
+    @JsonProperty("writable_by")
+    private List<String> writableBy;
+    @JsonProperty("delete_at")
+    private LocalDateTime deleteAt;
+    @JsonProperty("trash_at")
+    private LocalDateTime trashAt;
+    @JsonProperty("is_trashed")
+    private Boolean isTrashed;
+    @JsonProperty("command")
+    private List<String> command;
+    @JsonProperty("container_count")
+    private Integer containerCount;
+    @JsonProperty("container_count_max")
+    private Integer containerCountMax;
+    @JsonProperty("container_image")
+    private String containerImage;
+    @JsonProperty("container_uuid")
+    private String containerUuid;
+    @JsonProperty("cwd")
+    private String cwd;
+    @JsonProperty("environment")
+    private Object environment;
+    @JsonProperty("expires_at")
+    private LocalDateTime expiresAt;
+    @JsonProperty("filters")
+    private List<String> filters;
+    @JsonProperty("log_uuid")
+    private String logUuid;
+    @JsonProperty("mounts")
+    private Object mounts;
+    @JsonProperty("output_name")
+    private String outputName;
+    @JsonProperty("output_path")
+    private String outputPath;
+    @JsonProperty("output_uuid")
+    private String outputUuid;
+    @JsonProperty("output_ttl")
+    private Integer outputTtl;
+    @JsonProperty("priority")
+    private Integer priority;
+    @JsonProperty("properties")
+    private Object properties;
+    @JsonProperty("requesting_container_uuid")
+    private String requestingContainerUuid;
+    @JsonProperty("runtime_constraints")
+    private RuntimeConstraints runtimeConstraints;
+    @JsonProperty("scheduling_parameters")
+    private Object schedulingParameters;
+    @JsonProperty("state")
+    private String state;
+    @JsonProperty("use_existing")
+    private Boolean useExisting;
+
+    public String getName() {
+        return this.name;
+    }
+
+    public String getGroupClass() {
+        return this.groupClass;
+    }
+
+    public String getDescription() {
+        return this.description;
+    }
+
+    public List<String> getWritableBy() {
+        return this.writableBy;
+    }
+
+    public LocalDateTime getDeleteAt() {
+        return this.deleteAt;
+    }
+
+    public LocalDateTime getTrashAt() {
+        return this.trashAt;
+    }
+
+    public Boolean getIsTrashed() {
+        return this.isTrashed;
+    }
+
+    public List<String> getCommand() {
+        return this.command;
+    }
+
+    public Integer getContainerCount() {
+        return this.containerCount;
+    }
+
+    public Integer getContainerCountMax() {
+        return this.containerCountMax;
+    }
+
+    public String getContainerImage() {
+        return this.containerImage;
+    }
+
+    public String getContainerUuid() {
+        return this.containerUuid;
+    }
+
+    public String getCwd() {
+        return this.cwd;
+    }
+
+    public Object getEnvironment() {
+        return this.environment;
+    }
+
+    public LocalDateTime getExpiresAt() {
+        return this.expiresAt;
+    }
+
+    public List<String> getFilters() {
+        return this.filters;
+    }
+
+    public String getLogUuid() {
+        return this.logUuid;
+    }
+
+    public Object getMounts() {
+        return this.mounts;
+    }
+
+    public String getOutputName() {
+        return this.outputName;
+    }
+
+    public String getOutputPath() {
+        return this.outputPath;
+    }
+
+    public String getOutputUuid() {
+        return this.outputUuid;
+    }
+
+    public Integer getOutputTtl() {
+        return this.outputTtl;
+    }
+
+    public Integer getPriority() {
+        return this.priority;
+    }
+
+    public Object getProperties() {
+        return this.properties;
+    }
+
+    public String getRequestingContainerUuid() {
+        return this.requestingContainerUuid;
+    }
+
+    public RuntimeConstraints getRuntimeConstraints() {
+        return this.runtimeConstraints;
+    }
+
+    public Object getSchedulingParameters() {
+        return this.schedulingParameters;
+    }
+
+    public String getState() {
+        return this.state;
+    }
+
+    public Boolean getUseExisting() {
+        return this.useExisting;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setGroupClass(String groupClass) {
+        this.groupClass = groupClass;
+    }
+
+    public void setDescription(String description) {
+        this.description = description;
+    }
+
+    public void setWritableBy(List<String> writableBy) {
+        this.writableBy = writableBy;
+    }
+
+    public void setDeleteAt(LocalDateTime deleteAt) {
+        this.deleteAt = deleteAt;
+    }
+
+    public void setTrashAt(LocalDateTime trashAt) {
+        this.trashAt = trashAt;
+    }
+
+    public void setIsTrashed(Boolean isTrashed) {
+        this.isTrashed = isTrashed;
+    }
+
+    public void setCommand(List<String> command) {
+        this.command = command;
+    }
+
+    public void setContainerCount(Integer containerCount) {
+        this.containerCount = containerCount;
+    }
+
+    public void setContainerCountMax(Integer containerCountMax) {
+        this.containerCountMax = containerCountMax;
+    }
+
+    public void setContainerImage(String containerImage) {
+        this.containerImage = containerImage;
+    }
+
+    public void setContainerUuid(String containerUuid) {
+        this.containerUuid = containerUuid;
+    }
+
+    public void setCwd(String cwd) {
+        this.cwd = cwd;
+    }
+
+    public void setEnvironment(Object environment) {
+        this.environment = environment;
+    }
+
+    public void setExpiresAt(LocalDateTime expiresAt) {
+        this.expiresAt = expiresAt;
+    }
+
+    public void setFilters(List<String> filters) {
+        this.filters = filters;
+    }
+
+    public void setLogUuid(String logUuid) {
+        this.logUuid = logUuid;
+    }
+
+    public void setMounts(Object mounts) {
+        this.mounts = mounts;
+    }
+
+    public void setOutputName(String outputName) {
+        this.outputName = outputName;
+    }
+
+    public void setOutputPath(String outputPath) {
+        this.outputPath = outputPath;
+    }
+
+    public void setOutputUuid(String outputUuid) {
+        this.outputUuid = outputUuid;
+    }
+
+    public void setOutputTtl(Integer outputTtl) {
+        this.outputTtl = outputTtl;
+    }
+
+    public void setPriority(Integer priority) {
+        this.priority = priority;
+    }
+
+    public void setProperties(Object properties) {
+        this.properties = properties;
+    }
+
+    public void setRequestingContainerUuid(String requestingContainerUuid) {
+        this.requestingContainerUuid = requestingContainerUuid;
+    }
+
+    public void setRuntimeConstraints(RuntimeConstraints runtimeConstraints) {
+        this.runtimeConstraints = runtimeConstraints;
+    }
+
+    public void setSchedulingParameters(Object schedulingParameters) {
+        this.schedulingParameters = schedulingParameters;
+    }
+
+    public void setState(String state) {
+        this.state = state;
+    }
+
+    public void setUseExisting(Boolean useExisting) {
+        this.useExisting = useExisting;
+    }
+
+    public String toString() {
+        return "Group(name=" + this.getName() + ", groupClass=" + this.getGroupClass() + ", description=" + this.getDescription() + ", writableBy=" + this.getWritableBy() + ", deleteAt=" + this.getDeleteAt() + ", trashAt=" + this.getTrashAt() + ", isTrashed=" + this.getIsTrashed() + ", command=" + this.getCommand() + ", containerCount=" + this.getContainerCount() + ", containerCountMax=" + this.getContainerCountMax() + ", containerImage=" + this.getContainerImage() + ", containerUuid=" + this.getContainerUuid() + ", cwd=" + this.getCwd() + ", environment=" + this.getEnvironment() + ", expiresAt=" + this.getExpiresAt() + ", filters=" + this.getFilters() + ", logUuid=" + this.getLogUuid() + ", mounts=" + this.getMounts() + ", outputName=" + this.getOutputName() + ", outputPath=" + this.getOutputPath() + ", outputUuid=" + this.getOutputUuid() + ", outputTtl=" + this.getOutputTtl() + ", priority=" + this.getPriority() + ", properties=" + this.getProperties() + ", requestingContainerUuid=" + this.getRequestingContainerUuid() + ", runtimeConstraints=" + this.getRuntimeConstraints() + ", schedulingParameters=" + this.getSchedulingParameters() + ", state=" + this.getState() + ", useExisting=" + this.getUseExisting() + ")";
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/GroupList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/GroupList.java
new file mode 100644 (file)
index 0000000..c78d8ff
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class GroupList extends ItemList {
+
+    @JsonProperty("items")
+    private List<Group> items;
+
+    public List<Group> getItems() {
+        return this.items;
+    }
+
+    public void setItems(List<Group> items) {
+        this.items = items;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Item.java
new file mode 100644 (file)
index 0000000..be30e57
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.time.LocalDateTime;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "href", "kind", "etag", "uuid", "owner_uuid", "created_at", "modified_by_client_uuid",
+        "modified_by_user_uuid", "modified_at", "updated_at" })
+public abstract class Item {
+
+    @JsonProperty("href")
+    private String href;
+    @JsonProperty("kind")
+    private String kind;
+    @JsonProperty("etag")
+    private String etag;
+    @JsonProperty("uuid")
+    private String uuid;
+    @JsonProperty("owner_uuid")
+    private String ownerUuid;
+    @JsonProperty("created_at")
+    private LocalDateTime createdAt;
+    @JsonProperty("modified_by_client_uuid")
+    private String modifiedByClientUuid;
+    @JsonProperty("modified_by_user_uuid")
+    private String modifiedByUserUuid;
+    @JsonProperty("modified_at")
+    private LocalDateTime modifiedAt;
+    @JsonProperty("updated_at")
+    private LocalDateTime updatedAt;
+
+    public String getHref() {
+        return this.href;
+    }
+
+    public String getKind() {
+        return this.kind;
+    }
+
+    public String getEtag() {
+        return this.etag;
+    }
+
+    public String getUuid() {
+        return this.uuid;
+    }
+
+    public String getOwnerUuid() {
+        return this.ownerUuid;
+    }
+
+    public LocalDateTime getCreatedAt() {
+        return this.createdAt;
+    }
+
+    public String getModifiedByClientUuid() {
+        return this.modifiedByClientUuid;
+    }
+
+    public String getModifiedByUserUuid() {
+        return this.modifiedByUserUuid;
+    }
+
+    public LocalDateTime getModifiedAt() {
+        return this.modifiedAt;
+    }
+
+    public LocalDateTime getUpdatedAt() {
+        return this.updatedAt;
+    }
+
+    public void setHref(String href) {
+        this.href = href;
+    }
+
+    public void setKind(String kind) {
+        this.kind = kind;
+    }
+
+    public void setEtag(String etag) {
+        this.etag = etag;
+    }
+
+    public void setUuid(String uuid) {
+        this.uuid = uuid;
+    }
+
+    public void setOwnerUuid(String ownerUuid) {
+        this.ownerUuid = ownerUuid;
+    }
+
+    public void setCreatedAt(LocalDateTime createdAt) {
+        this.createdAt = createdAt;
+    }
+
+    public void setModifiedByClientUuid(String modifiedByClientUuid) {
+        this.modifiedByClientUuid = modifiedByClientUuid;
+    }
+
+    public void setModifiedByUserUuid(String modifiedByUserUuid) {
+        this.modifiedByUserUuid = modifiedByUserUuid;
+    }
+
+    public void setModifiedAt(LocalDateTime modifiedAt) {
+        this.modifiedAt = modifiedAt;
+    }
+
+    public void setUpdatedAt(LocalDateTime updatedAt) {
+        this.updatedAt = updatedAt;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/ItemList.java
new file mode 100644 (file)
index 0000000..b15a362
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "kind", "etag", "self_link", "offset", "limit", "items_available" })
+public class ItemList {
+
+    @JsonProperty("kind")
+    private String kind;
+    @JsonProperty("etag")
+    private String etag;
+    @JsonProperty("self_link")
+    private String selfLink;
+    @JsonProperty("offset")
+    private Object offset;
+    @JsonProperty("limit")
+    private Object limit;
+    @JsonProperty("items_available")
+    private Integer itemsAvailable;
+
+    public String getKind() {
+        return this.kind;
+    }
+
+    public String getEtag() {
+        return this.etag;
+    }
+
+    public String getSelfLink() {
+        return this.selfLink;
+    }
+
+    public Object getOffset() {
+        return this.offset;
+    }
+
+    public Object getLimit() {
+        return this.limit;
+    }
+
+    public Integer getItemsAvailable() {
+        return this.itemsAvailable;
+    }
+
+    public void setKind(String kind) {
+        this.kind = kind;
+    }
+
+    public void setEtag(String etag) {
+        this.etag = etag;
+    }
+
+    public void setSelfLink(String selfLink) {
+        this.selfLink = selfLink;
+    }
+
+    public void setOffset(Object offset) {
+        this.offset = offset;
+    }
+
+    public void setLimit(Object limit) {
+        this.limit = limit;
+    }
+
+    public void setItemsAvailable(Integer itemsAvailable) {
+        this.itemsAvailable = itemsAvailable;
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepService.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepService.java
new file mode 100644 (file)
index 0000000..c29b44c
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.*;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "service_host", "service_port", "service_ssl_flag", "service_type", "read_only" })
+public class KeepService extends Item {
+
+    @JsonProperty("service_host")
+    private String serviceHost;
+    @JsonProperty("service_port")
+    private Integer servicePort;
+    @JsonProperty("service_ssl_flag")
+    private Boolean serviceSslFlag;
+    @JsonProperty("service_type")
+    private String serviceType;
+    @JsonProperty("read_only")
+    private Boolean readOnly;
+    @JsonIgnore
+    private String serviceRoot;
+
+    public String getServiceHost() {
+        return this.serviceHost;
+    }
+
+    public Integer getServicePort() {
+        return this.servicePort;
+    }
+
+    public Boolean getServiceSslFlag() {
+        return this.serviceSslFlag;
+    }
+
+    public String getServiceType() {
+        return this.serviceType;
+    }
+
+    public Boolean getReadOnly() {
+        return this.readOnly;
+    }
+
+    public String getServiceRoot() {
+        return this.serviceRoot;
+    }
+
+    public void setServiceHost(String serviceHost) {
+        this.serviceHost = serviceHost;
+    }
+
+    public void setServicePort(Integer servicePort) {
+        this.servicePort = servicePort;
+    }
+
+    public void setServiceSslFlag(Boolean serviceSslFlag) {
+        this.serviceSslFlag = serviceSslFlag;
+    }
+
+    public void setServiceType(String serviceType) {
+        this.serviceType = serviceType;
+    }
+
+    public void setReadOnly(Boolean readOnly) {
+        this.readOnly = readOnly;
+    }
+
+    public void setServiceRoot(String serviceRoot) {
+        this.serviceRoot = serviceRoot;
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java
new file mode 100644 (file)
index 0000000..bbc09dc
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class KeepServiceList extends ItemList {
+
+    @JsonProperty("items")
+    private List<KeepService> items;
+
+    public List<KeepService> getItems() {
+        return this.items;
+    }
+
+    public void setItems(List<KeepService> items) {
+        this.items = items;
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java
new file mode 100644 (file)
index 0000000..a23cd98
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "API", "vcpus", "ram", "keep_cache_ram" })
+public class RuntimeConstraints {
+
+    @JsonProperty("API")
+    private Boolean api;
+    @JsonProperty("vcpus")
+    private Integer vcpus;
+    @JsonProperty("ram")
+    private Long ram;
+    @JsonProperty("keep_cache_ram")
+    private Long keepCacheRam;
+
+    public Boolean getApi() {
+        return this.api;
+    }
+
+    public Integer getVcpus() {
+        return this.vcpus;
+    }
+
+    public Long getRam() {
+        return this.ram;
+    }
+
+    public Long getKeepCacheRam() {
+        return this.keepCacheRam;
+    }
+
+    public void setApi(Boolean api) {
+        this.api = api;
+    }
+
+    public void setVcpus(Integer vcpus) {
+        this.vcpus = vcpus;
+    }
+
+    public void setRam(Long ram) {
+        this.ram = ram;
+    }
+
+    public void setKeepCacheRam(Long keepCacheRam) {
+        this.keepCacheRam = keepCacheRam;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/User.java
new file mode 100644 (file)
index 0000000..5c86a07
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "email", "username", "full_name", "first_name", "last_name", "identity_url", "is_active", "is_admin", "is_invited", 
+    "prefs", "writable_by", "default_owner_uuid" })
+public class User extends Item {
+
+    @JsonProperty("email")
+    private String email;
+    @JsonProperty("username")
+    private String username;
+    @JsonProperty("full_name")
+    private String fullName;
+    @JsonProperty("first_name")
+    private String firstName;
+    @JsonProperty("last_name")
+    private String lastName;
+    @JsonProperty("identity_url")
+    private String identityUrl;
+    @JsonProperty("is_active")
+    private Boolean isActive;
+    @JsonProperty("is_admin")
+    private Boolean isAdmin;
+    @JsonProperty("is_invited")
+    private Boolean isInvited;
+    @JsonProperty("prefs")
+    private Object prefs;
+    @JsonProperty("writable_by")
+    private List<String> writableBy;
+    @JsonProperty("default_owner_uuid")
+    private Boolean defaultOwnerUuid;
+
+    public String getEmail() {
+        return this.email;
+    }
+
+    public String getUsername() {
+        return this.username;
+    }
+
+    public String getFullName() {
+        return this.fullName;
+    }
+
+    public String getFirstName() {
+        return this.firstName;
+    }
+
+    public String getLastName() {
+        return this.lastName;
+    }
+
+    public String getIdentityUrl() {
+        return this.identityUrl;
+    }
+
+    public Boolean getIsActive() {
+        return this.isActive;
+    }
+
+    public Boolean getIsAdmin() {
+        return this.isAdmin;
+    }
+
+    public Boolean getIsInvited() {
+        return this.isInvited;
+    }
+
+    public Object getPrefs() {
+        return this.prefs;
+    }
+
+    public List<String> getWritableBy() {
+        return this.writableBy;
+    }
+
+    public Boolean getDefaultOwnerUuid() {
+        return this.defaultOwnerUuid;
+    }
+
+    public void setEmail(String email) {
+        this.email = email;
+    }
+
+    public void setUsername(String username) {
+        this.username = username;
+    }
+
+    public void setFullName(String fullName) {
+        this.fullName = fullName;
+    }
+
+    public void setFirstName(String firstName) {
+        this.firstName = firstName;
+    }
+
+    public void setLastName(String lastName) {
+        this.lastName = lastName;
+    }
+
+    public void setIdentityUrl(String identityUrl) {
+        this.identityUrl = identityUrl;
+    }
+
+    public void setIsActive(Boolean isActive) {
+        this.isActive = isActive;
+    }
+
+    public void setIsAdmin(Boolean isAdmin) {
+        this.isAdmin = isAdmin;
+    }
+
+    public void setIsInvited(Boolean isInvited) {
+        this.isInvited = isInvited;
+    }
+
+    public void setPrefs(Object prefs) {
+        this.prefs = prefs;
+    }
+
+    public void setWritableBy(List<String> writableBy) {
+        this.writableBy = writableBy;
+    }
+
+    public void setDefaultOwnerUuid(Boolean defaultOwnerUuid) {
+        this.defaultOwnerUuid = defaultOwnerUuid;
+    }
+
+    public String toString() {
+        return "User(email=" + this.getEmail() + ", username=" + this.getUsername() + ", fullName=" + this.getFullName() + ", firstName=" + this.getFirstName() + ", lastName=" + this.getLastName() + ", identityUrl=" + this.getIdentityUrl() + ", isActive=" + this.getIsActive() + ", isAdmin=" + this.getIsAdmin() + ", isInvited=" + this.getIsInvited() + ", prefs=" + this.getPrefs() + ", writableBy=" + this.getWritableBy() + ", defaultOwnerUuid=" + this.getDefaultOwnerUuid() + ")";
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/UserList.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/UserList.java
new file mode 100644 (file)
index 0000000..e148e72
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonPropertyOrder({ "items" })
+public class UserList extends ItemList {
+
+    @JsonProperty("items")
+    private List<User> items;
+
+    public List<User> getItems() {
+        return this.items;
+    }
+
+    public void setItems(List<User> items) {
+        this.items = items;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java
new file mode 100644 (file)
index 0000000..6da4408
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model.argument;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+public abstract class Argument {
+
+    @JsonIgnore
+    private String uuid;
+
+    public String getUuid() {
+        return this.uuid;
+    }
+
+    public void setUuid(String uuid) {
+        this.uuid = uuid;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java
new file mode 100644 (file)
index 0000000..16febf7
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model.argument;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ "limit", "order", "filters", "recursive" })
+public class ContentsGroup extends Argument {
+
+    @JsonProperty("limit")
+    private Integer limit;
+
+    @JsonProperty("order")
+    private String order;
+
+    @JsonProperty("filters")
+    private List<String> filters;
+
+    @JsonProperty("recursive")
+    private Boolean recursive;
+
+    public Integer getLimit() {
+        return this.limit;
+    }
+
+    public String getOrder() {
+        return this.order;
+    }
+
+    public List<String> getFilters() {
+        return this.filters;
+    }
+
+    public Boolean getRecursive() {
+        return this.recursive;
+    }
+
+    public void setLimit(Integer limit) {
+        this.limit = limit;
+    }
+
+    public void setOrder(String order) {
+        this.order = order;
+    }
+
+    public void setFilters(List<String> filters) {
+        this.filters = filters;
+    }
+
+    public void setRecursive(Boolean recursive) {
+        this.recursive = recursive;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java
new file mode 100644 (file)
index 0000000..ae16dec
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model.argument;
+
+import com.fasterxml.jackson.annotation.JsonFormat;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+@JsonFormat(shape = JsonFormat.Shape.ARRAY)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ "attribute", "operator", "operand" })
+public class Filter {
+
+    @JsonProperty("attribute")
+    private String attribute;
+
+    @JsonProperty("operator")
+    private Operator operator;
+
+    @JsonProperty("operand")
+    private Object operand;
+
+    private Filter(String attribute, Operator operator, Object operand) {
+        this.attribute = attribute;
+        this.operator = operator;
+        this.operand = operand;
+    }
+
+    public static Filter of(String attribute, Operator operator, Object operand) {
+        return new Filter(attribute, operator, operand);
+    }
+
+    public String getAttribute() {
+        return this.attribute;
+    }
+
+    public Operator getOperator() {
+        return this.operator;
+    }
+
+    public Object getOperand() {
+        return this.operand;
+    }
+
+    public boolean equals(Object o) {
+        if (o == this) return true;
+        if (!(o instanceof Filter)) return false;
+        final Filter other = (Filter) o;
+        final Object this$attribute = this.getAttribute();
+        final Object other$attribute = other.getAttribute();
+        if (this$attribute == null ? other$attribute != null : !this$attribute.equals(other$attribute)) return false;
+        final Object this$operator = this.getOperator();
+        final Object other$operator = other.getOperator();
+        if (this$operator == null ? other$operator != null : !this$operator.equals(other$operator)) return false;
+        final Object this$operand = this.getOperand();
+        final Object other$operand = other.getOperand();
+        if (this$operand == null ? other$operand != null : !this$operand.equals(other$operand)) return false;
+        return true;
+    }
+
+    public int hashCode() {
+        final int PRIME = 59;
+        int result = 1;
+        final Object $attribute = this.getAttribute();
+        result = result * PRIME + ($attribute == null ? 43 : $attribute.hashCode());
+        final Object $operator = this.getOperator();
+        result = result * PRIME + ($operator == null ? 43 : $operator.hashCode());
+        final Object $operand = this.getOperand();
+        result = result * PRIME + ($operand == null ? 43 : $operand.hashCode());
+        return result;
+    }
+
+    public String toString() {
+        return "Filter(attribute=" + this.getAttribute() + ", operator=" + this.getOperator() + ", operand=" + this.getOperand() + ")";
+    }
+
+    public enum Operator {
+
+        @JsonProperty("<")
+        LESS,
+
+        @JsonProperty("<=")
+        LESS_EQUALS,
+
+        @JsonProperty(">=")
+        MORE_EQUALS,
+
+        @JsonProperty(">")
+        MORE,
+
+        @JsonProperty("like")
+        LIKE,
+
+        @JsonProperty("ilike")
+        ILIKE,
+
+        @JsonProperty("=")
+        EQUALS,
+
+        @JsonProperty("!=")
+        NOT_EQUALS,
+
+        @JsonProperty("in")
+        IN,
+
+        @JsonProperty("not in")
+        NOT_IN,
+
+        @JsonProperty("is_a")
+        IS_A
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java
new file mode 100644 (file)
index 0000000..70231e6
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model.argument;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+import java.util.List;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ "limit", "offset", "filters", "order", "select", "distinct", "count" })
+public class ListArgument extends Argument {
+
+    @JsonProperty("limit")
+    private Integer limit;
+
+    @JsonProperty("offset")
+    private Integer offset;
+    
+    @JsonProperty("filters")
+    private List<Filter> filters;
+
+    @JsonProperty("order")
+    private List<String> order;
+
+    @JsonProperty("select")
+    private List<String> select;
+
+    @JsonProperty("distinct")
+    private Boolean distinct;
+
+    @JsonProperty("count")
+    private Count count;
+
+
+    ListArgument(Integer limit, Integer offset, List<Filter> filters, List<String> order, List<String> select, Boolean distinct, Count count) {
+        this.limit = limit;
+        this.offset = offset;
+        this.filters = filters;
+        this.order = order;
+        this.select = select;
+        this.distinct = distinct;
+        this.count = count;
+    }
+
+    public static ListArgumentBuilder builder() {
+        return new ListArgumentBuilder();
+    }
+
+    public enum Count {
+        
+        @JsonProperty("exact")
+        EXACT,
+        
+        @JsonProperty("none")
+        NONE
+    }
+
+    public static class ListArgumentBuilder {
+        private Integer limit;
+        private Integer offset;
+        private List<Filter> filters;
+        private List<String> order;
+        private List<String> select;
+        private Boolean distinct;
+        private Count count;
+
+        ListArgumentBuilder() {
+        }
+
+        public ListArgumentBuilder limit(Integer limit) {
+            this.limit = limit;
+            return this;
+        }
+
+        public ListArgumentBuilder offset(Integer offset) {
+            this.offset = offset;
+            return this;
+        }
+
+        public ListArgumentBuilder filters(List<Filter> filters) {
+            this.filters = filters;
+            return this;
+        }
+
+        public ListArgumentBuilder order(List<String> order) {
+            this.order = order;
+            return this;
+        }
+
+        public ListArgumentBuilder select(List<String> select) {
+            this.select = select;
+            return this;
+        }
+
+        public ListArgumentBuilder distinct(Boolean distinct) {
+            this.distinct = distinct;
+            return this;
+        }
+
+        public ListArgumentBuilder count(Count count) {
+            this.count = count;
+            return this;
+        }
+
+        public ListArgument build() {
+            return new ListArgument(limit, offset, filters, order, select, distinct, count);
+        }
+
+        public String toString() {
+            return "ListArgument.ListArgumentBuilder(limit=" + this.limit +
+                    ", offset=" + this.offset + ", filters=" + this.filters +
+                    ", order=" + this.order + ", select=" + this.select +
+                    ", distinct=" + this.distinct + ", count=" + this.count + ")";
+        }
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java
new file mode 100644 (file)
index 0000000..027dbf7
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model.argument;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ "ensure_unique_name" })
+public class UntrashGroup extends Argument {
+
+    @JsonProperty("ensure_unique_name")
+    private Boolean ensureUniqueName;
+
+    public Boolean getEnsureUniqueName() {
+        return this.ensureUniqueName;
+    }
+
+    public void setEnsureUniqueName(Boolean ensureUniqueName) {
+        this.ensureUniqueName = ensureUniqueName;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/common/Characters.java b/sdk/java-v2/src/main/java/org/arvados/client/common/Characters.java
new file mode 100644 (file)
index 0000000..1e49a71
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.common;
+
+public final class Characters {
+
+    private Characters() {}
+
+    public static final String SPACE = "\\040";
+    public static final String NEW_LINE = "\n";
+    public static final String SLASH = "/";
+    public static final String DOT = ".";
+    public static final String COLON = ":";
+    public static final String PERCENT = "%";
+    public static final String QUOTE = "\"";
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/common/Headers.java b/sdk/java-v2/src/main/java/org/arvados/client/common/Headers.java
new file mode 100644 (file)
index 0000000..4b43ed9
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.common;
+
+public final class Headers {
+
+    private Headers() {}
+    
+    public static final String X_KEEP_DESIRED_REPLICAS = "X-Keep-Desired-Replicas";
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/common/Patterns.java b/sdk/java-v2/src/main/java/org/arvados/client/common/Patterns.java
new file mode 100644 (file)
index 0000000..c852cb0
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.common;
+
+public final class Patterns {
+
+    public static final String HINT_PATTERN = "^[A-Z][A-Za-z0-9@_-]+$";
+    public static final String FILE_TOKEN_PATTERN = "(\\d+:\\d+:\\S+)";
+    public static final String LOCATOR_PATTERN = "([0-9a-f]{32})\\+([0-9]+)(\\+[A-Z][-A-Za-z0-9@_]*)*";
+    public static final String GROUP_UUID_PATTERN = "[a-z0-9]{5}-j7d0g-[a-z0-9]{15}";
+    public static final String USER_UUID_PATTERN = "[a-z0-9]{5}-tpzed-[a-z0-9]{15}";
+
+    private Patterns() {}
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java
new file mode 100644 (file)
index 0000000..c9a4109
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.config;
+
+import java.io.File;
+
+public interface ConfigProvider {
+
+    //API
+    boolean isApiHostInsecure();
+
+    String getKeepWebHost();
+
+    int getKeepWebPort();
+
+    String getApiHost();
+
+    int getApiPort();
+
+    String getApiToken();
+
+    String getApiProtocol();
+
+
+    //FILE UPLOAD
+    int getFileSplitSize();
+
+    File getFileSplitDirectory();
+
+    int getNumberOfCopies();
+
+    int getNumberOfRetries();
+
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
new file mode 100644 (file)
index 0000000..17e0696
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.config;
+
+import java.io.File;
+
+public class ExternalConfigProvider implements ConfigProvider {
+
+    private boolean apiHostInsecure;
+    private String keepWebHost;
+    private int keepWebPort;
+    private String apiHost;
+    private int apiPort;
+    private String apiToken;
+    private String apiProtocol;
+    private int fileSplitSize;
+    private File fileSplitDirectory;
+    private int numberOfCopies;
+    private int numberOfRetries;
+
+    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort, String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory, int numberOfCopies, int numberOfRetries) {
+        this.apiHostInsecure = apiHostInsecure;
+        this.keepWebHost = keepWebHost;
+        this.keepWebPort = keepWebPort;
+        this.apiHost = apiHost;
+        this.apiPort = apiPort;
+        this.apiToken = apiToken;
+        this.apiProtocol = apiProtocol;
+        this.fileSplitSize = fileSplitSize;
+        this.fileSplitDirectory = fileSplitDirectory;
+        this.numberOfCopies = numberOfCopies;
+        this.numberOfRetries = numberOfRetries;
+    }
+
+    public static ExternalConfigProviderBuilder builder() {
+        return new ExternalConfigProviderBuilder();
+    }
+
+    @Override
+    public String toString() {
+        return "ExternalConfigProvider{" +
+                "apiHostInsecure=" + apiHostInsecure +
+                ", keepWebHost='" + keepWebHost + '\'' +
+                ", keepWebPort=" + keepWebPort +
+                ", apiHost='" + apiHost + '\'' +
+                ", apiPort=" + apiPort +
+                ", apiToken='" + apiToken + '\'' +
+                ", apiProtocol='" + apiProtocol + '\'' +
+                ", fileSplitSize=" + fileSplitSize +
+                ", fileSplitDirectory=" + fileSplitDirectory +
+                ", numberOfCopies=" + numberOfCopies +
+                ", numberOfRetries=" + numberOfRetries +
+                '}';
+    }
+
+    public boolean isApiHostInsecure() {
+        return this.apiHostInsecure;
+    }
+
+    public String getKeepWebHost() {
+        return this.keepWebHost;
+    }
+
+    public int getKeepWebPort() {
+        return this.keepWebPort;
+    }
+
+    public String getApiHost() {
+        return this.apiHost;
+    }
+
+    public int getApiPort() {
+        return this.apiPort;
+    }
+
+    public String getApiToken() {
+        return this.apiToken;
+    }
+
+    public String getApiProtocol() {
+        return this.apiProtocol;
+    }
+
+    public int getFileSplitSize() {
+        return this.fileSplitSize;
+    }
+
+    public File getFileSplitDirectory() {
+        return this.fileSplitDirectory;
+    }
+
+    public int getNumberOfCopies() {
+        return this.numberOfCopies;
+    }
+
+    public int getNumberOfRetries() {
+        return this.numberOfRetries;
+    }
+
+    public static class ExternalConfigProviderBuilder {
+        private boolean apiHostInsecure;
+        private String keepWebHost;
+        private int keepWebPort;
+        private String apiHost;
+        private int apiPort;
+        private String apiToken;
+        private String apiProtocol;
+        private int fileSplitSize;
+        private File fileSplitDirectory;
+        private int numberOfCopies;
+        private int numberOfRetries;
+
+        ExternalConfigProviderBuilder() {
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder apiHostInsecure(boolean apiHostInsecure) {
+            this.apiHostInsecure = apiHostInsecure;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebHost(String keepWebHost) {
+            this.keepWebHost = keepWebHost;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebPort(int keepWebPort) {
+            this.keepWebPort = keepWebPort;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder apiHost(String apiHost) {
+            this.apiHost = apiHost;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder apiPort(int apiPort) {
+            this.apiPort = apiPort;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder apiToken(String apiToken) {
+            this.apiToken = apiToken;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder apiProtocol(String apiProtocol) {
+            this.apiProtocol = apiProtocol;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitSize(int fileSplitSize) {
+            this.fileSplitSize = fileSplitSize;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitDirectory(File fileSplitDirectory) {
+            this.fileSplitDirectory = fileSplitDirectory;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfCopies(int numberOfCopies) {
+            this.numberOfCopies = numberOfCopies;
+            return this;
+        }
+
+        public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfRetries(int numberOfRetries) {
+            this.numberOfRetries = numberOfRetries;
+            return this;
+        }
+
+        public ExternalConfigProvider build() {
+            return new ExternalConfigProvider(apiHostInsecure, keepWebHost, keepWebPort, apiHost, apiPort, apiToken, apiProtocol, fileSplitSize, fileSplitDirectory, numberOfCopies, numberOfRetries);
+        }
+
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java
new file mode 100644 (file)
index 0000000..589c334
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.io.File;
+
+public class FileConfigProvider implements ConfigProvider {
+
+    private static final String DEFAULT_PATH = "arvados";
+    private final Config config;
+
+    public FileConfigProvider() {
+        config = ConfigFactory.load().getConfig(DEFAULT_PATH);
+    }
+
+    public FileConfigProvider(final String configFile) {
+        config = (configFile != null) ?
+                ConfigFactory.load(configFile).getConfig(DEFAULT_PATH) : ConfigFactory.load().getConfig(DEFAULT_PATH);
+    }
+
+    public Config getConfig() {
+        return config;
+    }
+
+    private File getFile(String path) {
+        return new File(config.getString(path));
+    }
+
+    private int getInt(String path) {
+        return config.getInt(path);
+    }
+
+    private boolean getBoolean(String path) {
+        return config.getBoolean(path);
+    }
+
+    private String getString(String path) {
+        return config.getString(path);
+    }
+
+    @Override
+    public boolean isApiHostInsecure() {
+        return this.getBoolean("api.host-insecure");
+    }
+
+    @Override
+    public String getKeepWebHost() {
+        return this.getString("api.keepweb-host");
+    }
+
+    @Override
+    public int getKeepWebPort() {
+        return this.getInt("api.keepweb-port");
+    }
+
+    @Override
+    public String getApiHost() {
+        return this.getString("api.host");
+    }
+
+    @Override
+    public int getApiPort() {
+        return this.getInt("api.port");
+    }
+
+    @Override
+    public String getApiToken() {
+        return this.getString("api.token");
+    }
+
+    @Override
+    public String getApiProtocol() {
+        return this.getString("api.protocol");
+    }
+
+    @Override
+    public int getFileSplitSize() {
+        return this.getInt("split-size");
+    }
+
+    @Override
+    public File getFileSplitDirectory() {
+        return this.getFile("temp-dir");
+    }
+
+    @Override
+    public int getNumberOfCopies() {
+        return this.getInt("copies");
+    }
+
+    @Override
+    public int getNumberOfRetries() {
+        return this.getInt("retries");
+    }
+
+    public String getIntegrationTestProjectUuid() {
+        return this.getString("integration-tests.project-uuid");
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java b/sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java
new file mode 100644 (file)
index 0000000..51a9962
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.exception;
+
+public class ArvadosApiException extends ArvadosClientException {
+
+    private static final long serialVersionUID = 1L;
+
+    public ArvadosApiException(String message) {
+        super(message);
+    }
+    
+    public ArvadosApiException(String message, Throwable cause) {
+        super(message, cause);
+    }
+    
+    public ArvadosApiException(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java b/sdk/java-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java
new file mode 100644 (file)
index 0000000..e93028d
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.exception;
+
+/**
+ * Parent exception for all exceptions in library.
+ * More specific exceptions like ArvadosApiException extend this class.
+ */
+public class ArvadosClientException extends RuntimeException {
+
+    public ArvadosClientException(String message) {
+        super(message);
+    }
+
+    public ArvadosClientException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public ArvadosClientException(Throwable cause) {
+        super(cause);
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
new file mode 100644 (file)
index 0000000..858edf5
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.facade;
+
+import com.google.common.collect.Lists;
+import org.arvados.client.api.client.CollectionsApiClient;
+import org.arvados.client.api.client.GroupsApiClient;
+import org.arvados.client.api.client.KeepWebApiClient;
+import org.arvados.client.api.client.UsersApiClient;
+import org.arvados.client.api.model.*;
+import org.arvados.client.api.model.argument.Filter;
+import org.arvados.client.api.model.argument.ListArgument;
+import org.arvados.client.config.FileConfigProvider;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.logic.collection.FileToken;
+import org.arvados.client.logic.collection.ManifestDecoder;
+import org.arvados.client.logic.keep.FileDownloader;
+import org.arvados.client.logic.keep.FileUploader;
+import org.arvados.client.logic.keep.KeepClient;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+public class ArvadosFacade {
+
+    private final ConfigProvider config;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(ArvadosFacade.class);
+    private CollectionsApiClient collectionsApiClient;
+    private GroupsApiClient groupsApiClient;
+    private UsersApiClient usersApiClient;
+    private FileDownloader fileDownloader;
+    private FileUploader fileUploader;
+    private static final String PROJECT = "project";
+    private static final String SUBPROJECT = "sub-project";
+
+    public ArvadosFacade(ConfigProvider config) {
+        this.config = config;
+        setFacadeFields();
+    }
+
+    public ArvadosFacade() {
+        this.config = new FileConfigProvider();
+        setFacadeFields();
+    }
+
+    private void setFacadeFields() {
+        collectionsApiClient = new CollectionsApiClient(config);
+        groupsApiClient = new GroupsApiClient(config);
+        usersApiClient = new UsersApiClient(config);
+        KeepClient keepClient = new KeepClient(config);
+        ManifestDecoder manifestDecoder = new ManifestDecoder();
+        KeepWebApiClient keepWebApiClient = new KeepWebApiClient(config);
+        fileDownloader = new FileDownloader(keepClient, manifestDecoder, collectionsApiClient, keepWebApiClient);
+        fileUploader = new FileUploader(keepClient, collectionsApiClient, config);
+    }
+
+    /**
+     * This method downloads single file from collection using Arvados Keep-Web.
+     * File is saved on a drive in specified location and returned.
+     *
+     * @param filePathName         path to the file in collection. If requested file is stored
+     *                             directly in collection (not within its subdirectory) this
+     *                             would be just the name of file (ex. 'file.txt').
+     *                             Otherwise full file path must be passed (ex. 'folder/file.txt')
+     * @param collectionUuid       uuid of collection containing requested file
+     * @param pathToDownloadFolder path to location in which file should be saved.
+     *                             Passed location must be a directory in which file of
+     *                             that name does not already exist.
+     * @return downloaded file
+     */
+    public File downloadFile(String filePathName, String collectionUuid, String pathToDownloadFolder) {
+        return fileDownloader.downloadSingleFileUsingKeepWeb(filePathName, collectionUuid, pathToDownloadFolder);
+    }
+
+    /**
+     * This method downloads all files from collection.
+     * Directory named by collection uuid is created in specified location,
+     * files are saved on a drive in this directory and list with downloaded
+     * files is returned.
+     *
+     * @param collectionUuid       uuid of collection from which files are downloaded
+     * @param pathToDownloadFolder path to location in which files should be saved.
+     *                             New folder named by collection uuid, containing
+     *                             downloaded files, is created in this location.
+     *                             Passed location must be a directory in which folder
+     *                             of that name does not already exist.
+     * @param usingKeepWeb         if set to true files will be downloaded using Keep Web.
+     *                             If set to false files will be downloaded using Keep Server API.
+     * @return list containing downloaded files
+     */
+    public List<File> downloadCollectionFiles(String collectionUuid, String pathToDownloadFolder, boolean usingKeepWeb) {
+        if (usingKeepWeb)
+            return fileDownloader.downloadFilesFromCollectionUsingKeepWeb(collectionUuid, pathToDownloadFolder);
+        return fileDownloader.downloadFilesFromCollection(collectionUuid, pathToDownloadFolder);
+    }
+
+    /**
+     * Lists all FileTokens (objects containing information about files) for
+     * specified collection.
+     * Information in each FileToken includes file path, name, size and position
+     * in data stream
+     *
+     * @param collectionUuid uuid of collection for which FileTokens are listed
+     * @return list containing FileTokens for each file in specified collection
+     */
+    public List<FileToken> listFileInfoFromCollection(String collectionUuid) {
+        return fileDownloader.listFileInfoFromCollection(collectionUuid);
+    }
+
+    /**
+     * Creates and uploads new collection containing passed files.
+     * Created collection has a default name and is uploaded to user's 'Home' project.
+     *
+     * @see ArvadosFacade#upload(List, String, String)
+     * @param files    list of files to be uploaded within new collection
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     */
+    public Collection upload(List<File> files) {
+        return upload(files, null, null);
+    }
+
+    /**
+     * Creates and uploads new collection containing a single file.
+     * Created collection has a default name and is uploaded to user's 'Home' project.
+     *
+     * @see ArvadosFacade#upload(List, String, String)
+     * @param file file to be uploaded
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     */
+    public Collection upload(File file) {
+        return upload(Collections.singletonList(file), null, null);
+    }
+
+    /**
+     * Uploads new collection with specified name and containing selected files
+     * to an existing project.
+     *
+     * @param sourceFiles    list of files to be uploaded within new collection
+     * @param collectionName name for the newly created collection.
+     *                       Collection with that name cannot be already created
+     *                       in specified project. If null is passed
+     *                       then collection name is set to default, containing
+     *                       phrase 'New Collection' and a timestamp.
+     * @param projectUuid    uuid of the project in which created collection is to be included.
+     *                       If null is passed then collection is uploaded to user's 'Home' project.
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     */
+    public Collection upload(List<File> sourceFiles, String collectionName, String projectUuid) {
+        return fileUploader.upload(sourceFiles, collectionName, projectUuid);
+    }
+
+    /**
+     * Uploads a file to a specified collection.
+     *
+     * @see ArvadosFacade#uploadToExistingCollection(List, String)
+     * @param file           file to be uploaded to existing collection. Filenames must be unique
+     *                       in comparison with files already existing within collection.
+     * @param collectionUUID UUID of collection to which files should be uploaded
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     */
+    public Collection uploadToExistingCollection(File file, String collectionUUID) {
+        return fileUploader.uploadToExistingCollection(Collections.singletonList(file), collectionUUID);
+    }
+
+    /**
+     * Uploads multiple files to an existing collection.
+     *
+     * @param files          list of files to be uploaded to existing collection.
+     *                       File names must be unique - both within passed list and
+     *                       in comparison with files already existing within collection.
+     * @param collectionUUID UUID of collection to which files should be uploaded
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     */
+    public Collection uploadToExistingCollection(List<File> files, String collectionUUID) {
+        return fileUploader.uploadToExistingCollection(files, collectionUUID);
+    }
+
+    /**
+     * Creates and uploads new empty collection to specified project.
+     *
+     * @param collectionName name for the newly created collection.
+     *                       Collection with that name cannot be already created
+     *                       in specified project.
+     * @param projectUuid    uuid of project that will contain uploaded empty collection.
+     *                       To select home project pass current user's uuid from getCurrentUser()
+     * @return collection object mapped from JSON that is returned from server after successful upload
+     * @see ArvadosFacade#getCurrentUser()
+     */
+    public Collection createEmptyCollection(String collectionName, String projectUuid) {
+        Collection collection = new Collection();
+        collection.setOwnerUuid(projectUuid);
+        collection.setName(collectionName);
+        return collectionsApiClient.create(collection);
+    }
+
+    /**
+     * Returns current user information based on Api Token provided via configuration
+     *
+     * @return user object mapped from JSON that is returned from server based on provided Api Token.
+     * It contains information about user who has this token assigned.
+     */
+    public User getCurrentUser() {
+        return usersApiClient.current();
+    }
+
+    /**
+     * Gets uuid of current user based on api Token provided in configuration and uses it to list all
+     * projects that this user owns in Arvados.
+     *
+     * @return GroupList containing all groups that current user is owner of.
+     * @see ArvadosFacade#getCurrentUser()
+     */
+    public GroupList showGroupsOwnedByCurrentUser() {
+        ListArgument listArgument = ListArgument.builder()
+                .filters(Arrays.asList(
+                        Filter.of("owner_uuid", Filter.Operator.LIKE, getCurrentUser().getUuid()),
+                        Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT)
+                        )))
+                .build();
+        GroupList groupList = groupsApiClient.list(listArgument);
+        log.debug("Groups owned by user:");
+        groupList.getItems().forEach(m -> log.debug(m.getUuid() + " -- " + m.getName()));
+
+        return groupList;
+    }
+
+    /**
+     * Gets uuid of current user based on api Token provided in configuration and uses it to list all
+     * projects that this user has read access to in Arvados.
+     *
+     * @return GroupList containing all groups that current user has read access to.
+     */
+    public GroupList showGroupsAccessibleByCurrentUser() {
+        ListArgument listArgument = ListArgument.builder()
+                .filters(Collections.singletonList(
+                        Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT)
+                        )))
+                .build();
+        GroupList groupList = groupsApiClient.list(listArgument);
+        log.debug("Groups accessible by user:");
+        groupList.getItems().forEach(m -> log.debug(m.getUuid() + " -- " + m.getName()));
+
+        return groupList;
+    }
+
+    /**
+     * Filters all collections from selected project and returns list of those that contain passed String in their name.
+     * Operator "LIKE" is used so in order to obtain certain collection it is sufficient to pass just part of its name.
+     * Returned collections in collectionList are ordered by date of creation (starting from oldest one).
+     *
+     * @param collectionName collections containing this param in their name will be returned.
+     *                       Passing a wildcard is possible - for example passing "a%" searches for
+     *                       all collections starting with "a".
+     * @param projectUuid    uuid of project in which will be searched for collections with given name. To search home
+     *                       project provide user uuid (from getCurrentUser())
+     * @return object CollectionList containing all collections matching specified name criteria
+     * @see ArvadosFacade#getCurrentUser()
+     */
+    public CollectionList getCollectionsFromProjectByName(String collectionName, String projectUuid) {
+        ListArgument listArgument = ListArgument.builder()
+                .filters(Arrays.asList(
+                        Filter.of("owner_uuid", Filter.Operator.LIKE, projectUuid),
+                        Filter.of("name", Filter.Operator.LIKE, collectionName)
+                ))
+                .order(Collections.singletonList("created_at"))
+                .build();
+
+        return collectionsApiClient.list(listArgument);
+    }
+
+    /**
+     * Creates new project that will be a subproject of "home" for current user.
+     *
+     * @param projectName name for the newly created project
+     * @return Group object containing information about created project
+     * (mapped from JSON returned from server after creating the project)
+     */
+    public Group createNewProject(String projectName) {
+        Group project = new Group();
+        project.setName(projectName);
+        project.setGroupClass(PROJECT);
+        Group createdProject = groupsApiClient.create(project);
+        log.debug("Project " + createdProject.getName() + " created with UUID: " + createdProject.getUuid());
+        return createdProject;
+    }
+
+    /**
+     * Deletes collection with specified uuid.
+     *
+     * @param collectionUuid uuid of collection to be deleted. User whose token is provided in configuration
+     *                       must be authorized to delete such collection.
+     * @return collection object with deleted collection (mapped from JSON returned from server after deleting the collection)
+     */
+    public Collection deleteCollection(String collectionUuid) {
+        Collection deletedCollection = collectionsApiClient.delete(collectionUuid);
+        log.debug("Collection: " + collectionUuid + " deleted.");
+        return deletedCollection;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java
new file mode 100644 (file)
index 0000000..25379f5
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.api.client.GroupsApiClient;
+import org.arvados.client.api.client.UsersApiClient;
+import org.arvados.client.exception.ArvadosApiException;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.common.Patterns;
+import org.arvados.client.config.FileConfigProvider;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.exception.ArvadosClientException;
+
+import java.io.File;
+import java.time.LocalDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.List;
+import java.util.Optional;
+
+public class CollectionFactory {
+
+    private ConfigProvider config;
+    private UsersApiClient usersApiClient;
+    private GroupsApiClient groupsApiClient;
+
+    private final String name;
+    private final String projectUuid;
+    private final List<File> manifestFiles;
+    private final List<String> manifestLocators;
+
+    private CollectionFactory(ConfigProvider config, String name, String projectUuid, List<File> manifestFiles, List<String> manifestLocators) {
+        this.name = name;
+        this.projectUuid = projectUuid;
+        this.manifestFiles = manifestFiles;
+        this.manifestLocators = manifestLocators;
+        this.config = config;
+        setApiClients();
+    }
+
+    public static CollectionFactoryBuilder builder() {
+        return new CollectionFactoryBuilder();
+    }
+
+    private void setApiClients() {
+        if(this.config == null) this.config = new FileConfigProvider();
+
+        this.usersApiClient = new UsersApiClient(config);
+        this.groupsApiClient = new GroupsApiClient(config);
+    }
+
+    public Collection create() {
+        ManifestFactory manifestFactory = ManifestFactory.builder()
+            .files(manifestFiles)
+            .locators(manifestLocators)
+            .build();
+        String manifest = manifestFactory.create();
+        
+        Collection newCollection = new Collection();
+        newCollection.setName(getNameOrDefault(name));
+        newCollection.setManifestText(manifest);
+        newCollection.setOwnerUuid(getDesiredProjectUuid(projectUuid));
+
+        return newCollection;
+    }
+
+    private String getNameOrDefault(String name) {
+        return Optional.ofNullable(name).orElseGet(() -> {
+            LocalDateTime dateTime = LocalDateTime.now();
+            DateTimeFormatter formatter = DateTimeFormatter.ofPattern("Y-MM-dd HH:mm:ss.SSS");
+            return String.format("New Collection (%s)", dateTime.format(formatter));
+        });
+    }
+
+    public String getDesiredProjectUuid(String projectUuid) {
+        try {
+            if (projectUuid == null || projectUuid.length() == 0){
+                return usersApiClient.current().getUuid();
+            } else if (projectUuid.matches(Patterns.USER_UUID_PATTERN)) {
+                return usersApiClient.get(projectUuid).getUuid();
+            } else if (projectUuid.matches(Patterns.GROUP_UUID_PATTERN)) {
+                return groupsApiClient.get(projectUuid).getUuid();
+            }
+        } catch (ArvadosApiException e) {
+            throw new ArvadosClientException(String.format("An error occurred while getting project by UUID %s", projectUuid));
+        }
+        throw new ArvadosClientException(String.format("No project with %s UUID found", projectUuid));
+    }
+
+    public static class CollectionFactoryBuilder {
+        private ConfigProvider config;
+        private String name;
+        private String projectUuid;
+        private List<File> manifestFiles;
+        private List<String> manifestLocators;
+
+        CollectionFactoryBuilder() {
+        }
+
+        public CollectionFactoryBuilder config(ConfigProvider config) {
+            this.config = config;
+            return this;
+        }
+
+        public CollectionFactoryBuilder name(String name) {
+            this.name = name;
+            return this;
+        }
+
+        public CollectionFactoryBuilder projectUuid(String projectUuid) {
+            this.projectUuid = projectUuid;
+            return this;
+        }
+
+        public CollectionFactoryBuilder manifestFiles(List<File> manifestFiles) {
+            this.manifestFiles = manifestFiles;
+            return this;
+        }
+
+        public CollectionFactoryBuilder manifestLocators(List<String> manifestLocators) {
+            this.manifestLocators = manifestLocators;
+            return this;
+        }
+
+        public CollectionFactory build() {
+            return new CollectionFactory(config, name, projectUuid, manifestFiles, manifestLocators);
+        }
+
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java
new file mode 100644 (file)
index 0000000..b41ccd3
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import com.google.common.base.Strings;
+import org.arvados.client.common.Characters;
+
+public class FileToken {
+
+    private int filePosition;
+    private int fileSize;
+    private String fileName;
+    private String path;
+
+    public FileToken(String fileTokenInfo) {
+        splitFileTokenInfo(fileTokenInfo);
+    }
+
+    public FileToken(String fileTokenInfo, String path) {
+        splitFileTokenInfo(fileTokenInfo);
+        this.path = path;
+    }
+
+    private void splitFileTokenInfo(String fileTokenInfo) {
+        String[] tokenPieces = fileTokenInfo.split(":");
+        this.filePosition = Integer.parseInt(tokenPieces[0]);
+        this.fileSize = Integer.parseInt(tokenPieces[1]);
+        this.fileName = tokenPieces[2].replace(Characters.SPACE, " ");
+    }
+
+    @Override
+    public String toString() {
+        return filePosition + ":" + fileSize + ":" + fileName;
+    }
+
+    public String getFullPath() {
+        return Strings.isNullOrEmpty(path) ? fileName : path + fileName;
+    }
+
+    public int getFilePosition() {
+        return this.filePosition;
+    }
+
+    public int getFileSize() {
+        return this.fileSize;
+    }
+
+    public String getFileName() {
+        return this.fileName;
+    }
+
+    public String getPath() {
+        return this.path;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java
new file mode 100644 (file)
index 0000000..6a76a4e
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.common.Characters;
+import org.arvados.client.exception.ArvadosClientException;
+import org.arvados.client.logic.keep.KeepLocator;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+
+import static java.util.stream.Collectors.toList;
+import static org.arvados.client.common.Patterns.FILE_TOKEN_PATTERN;
+import static org.arvados.client.common.Patterns.LOCATOR_PATTERN;
+
+public class ManifestDecoder {
+
+    public List<ManifestStream> decode(String manifestText) {
+
+        if (manifestText == null || manifestText.isEmpty()) {
+            throw new ArvadosClientException("Manifest text cannot be empty.");
+        }
+
+        List<String> manifestStreams = new ArrayList<>(Arrays.asList(manifestText.split("\\n")));
+        if (!manifestStreams.get(0).startsWith(". ")) {
+            throw new ArvadosClientException("Invalid first path component (expecting \".\")");
+        }
+
+        return manifestStreams.stream()
+                .map(this::decodeSingleManifestStream)
+                .collect(toList());
+    }
+
+    private ManifestStream decodeSingleManifestStream(String manifestStream) {
+        Objects.requireNonNull(manifestStream, "Manifest stream cannot be empty.");
+
+        LinkedList<String> manifestPieces = new LinkedList<>(Arrays.asList(manifestStream.split("\\s+")));
+        String streamName = manifestPieces.poll();
+        String path = ".".equals(streamName) ? "" : streamName.substring(2).concat(Characters.SLASH);
+
+        List<KeepLocator> keepLocators = manifestPieces
+                .stream()
+                .filter(p -> p.matches(LOCATOR_PATTERN))
+                .map(this::getKeepLocator)
+                .collect(toList());
+
+
+        List<FileToken> fileTokens = manifestPieces.stream()
+                .skip(keepLocators.size())
+                .filter(p -> p.matches(FILE_TOKEN_PATTERN))
+                .map(p -> new FileToken(p, path))
+                .collect(toList());
+
+        return new ManifestStream(streamName, keepLocators, fileTokens);
+
+    }
+
+    private KeepLocator getKeepLocator(String locatorString ) {
+        try {
+            return new KeepLocator(locatorString);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java
new file mode 100644 (file)
index 0000000..96d605d
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import com.google.common.collect.ImmutableList;
+import org.arvados.client.common.Characters;
+
+import java.io.File;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+
+public class ManifestFactory {
+
+    private Collection<File> files;
+    private List<String> locators;
+
+    ManifestFactory(Collection<File> files, List<String> locators) {
+        this.files = files;
+        this.locators = locators;
+    }
+
+    public static ManifestFactoryBuilder builder() {
+        return new ManifestFactoryBuilder();
+    }
+
+    public String create() {
+        ImmutableList.Builder<String> builder = new ImmutableList.Builder<String>()
+                .add(Characters.DOT)
+                .addAll(locators);
+        long filePosition = 0;
+        for (File file : files) {
+            builder.add(String.format("%d:%d:%s", filePosition, file.length(), file.getName().replace(" ", Characters.SPACE)));
+            filePosition += file.length();
+        }
+        String manifest = builder.build().stream().collect(Collectors.joining(" ")).concat(Characters.NEW_LINE);
+        return manifest;
+    }
+
+    public static class ManifestFactoryBuilder {
+        private Collection<File> files;
+        private List<String> locators;
+
+        ManifestFactoryBuilder() {
+        }
+
+        public ManifestFactory.ManifestFactoryBuilder files(Collection<File> files) {
+            this.files = files;
+            return this;
+        }
+
+        public ManifestFactory.ManifestFactoryBuilder locators(List<String> locators) {
+            this.locators = locators;
+            return this;
+        }
+
+        public ManifestFactory build() {
+            return new ManifestFactory(files, locators);
+        }
+
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java
new file mode 100644 (file)
index 0000000..3044030
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.logic.keep.KeepLocator;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class ManifestStream {
+
+    private String streamName;
+    private List<KeepLocator> keepLocators;
+    private List<FileToken> fileTokens;
+
+    public ManifestStream(String streamName, List<KeepLocator> keepLocators, List<FileToken> fileTokens) {
+        this.streamName = streamName;
+        this.keepLocators = keepLocators;
+        this.fileTokens = fileTokens;
+    }
+
+    @Override
+    public String toString() {
+        return streamName + " " + Stream.concat(keepLocators.stream().map(KeepLocator::toString), fileTokens.stream().map(FileToken::toString))
+                .collect(Collectors.joining(" "));
+    }
+
+    public String getStreamName() {
+        return this.streamName;
+    }
+
+    public List<KeepLocator> getKeepLocators() {
+        return this.keepLocators;
+    }
+
+    public List<FileToken> getFileTokens() {
+        return this.fileTokens;
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
new file mode 100644 (file)
index 0000000..1f694f2
--- /dev/null
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import com.google.common.collect.Lists;
+import org.arvados.client.api.client.CollectionsApiClient;
+import org.arvados.client.api.client.KeepWebApiClient;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.common.Characters;
+import org.arvados.client.exception.ArvadosClientException;
+import org.arvados.client.logic.collection.FileToken;
+import org.arvados.client.logic.collection.ManifestDecoder;
+import org.arvados.client.logic.collection.ManifestStream;
+import org.arvados.client.logic.keep.exception.DownloadFolderAlreadyExistsException;
+import org.arvados.client.logic.keep.exception.FileAlreadyExistsException;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class FileDownloader {
+
+    private final KeepClient keepClient;
+    private final ManifestDecoder manifestDecoder;
+    private final CollectionsApiClient collectionsApiClient;
+    private final KeepWebApiClient keepWebApiClient;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(FileDownloader.class);
+
+    public FileDownloader(KeepClient keepClient, ManifestDecoder manifestDecoder, CollectionsApiClient collectionsApiClient, KeepWebApiClient keepWebApiClient) {
+        this.keepClient = keepClient;
+        this.manifestDecoder = manifestDecoder;
+        this.collectionsApiClient = collectionsApiClient;
+        this.keepWebApiClient = keepWebApiClient;
+    }
+
+    public List<FileToken> listFileInfoFromCollection(String collectionUuid) {
+        Collection requestedCollection = collectionsApiClient.get(collectionUuid);
+        String manifestText = requestedCollection.getManifestText();
+
+        // decode manifest text and get list of all FileTokens for this collection
+        return manifestDecoder.decode(manifestText)
+                .stream()
+                .flatMap(p -> p.getFileTokens().stream())
+                .collect(Collectors.toList());
+    }
+
+    public File downloadSingleFileUsingKeepWeb(String filePathName, String collectionUuid, String pathToDownloadFolder) {
+        FileToken fileToken = getFileTokenFromCollection(filePathName, collectionUuid);
+        if (fileToken == null) {
+            throw new ArvadosClientException(String.format("%s not found in Collection with UUID %s", filePathName, collectionUuid));
+        }
+
+        File downloadedFile = checkIfFileExistsInTargetLocation(fileToken, pathToDownloadFolder);
+        try (FileOutputStream fos = new FileOutputStream(downloadedFile)) {
+            fos.write(keepWebApiClient.download(collectionUuid, filePathName));
+        } catch (IOException e) {
+            throw new ArvadosClientException(String.format("Unable to write down file %s", fileToken.getFileName()), e);
+        }
+        return downloadedFile;
+    }
+
+    public List<File> downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) {
+        String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath();
+        List<FileToken> fileTokens = listFileInfoFromCollection(collectionUuid);
+
+        List<CompletableFuture<File>> futures = Lists.newArrayList();
+        for (FileToken fileToken : fileTokens) {
+            futures.add(CompletableFuture.supplyAsync(() -> this.downloadOneFileFromCollectionUsingKeepWeb(fileToken, collectionUuid, collectionTargetDir)));
+        }
+
+        @SuppressWarnings("unchecked")
+        CompletableFuture<File>[] array = futures.toArray(new CompletableFuture[0]);
+        return Stream.of(array)
+                .map(CompletableFuture::join).collect(Collectors.toList());
+    }
+
+    private FileToken getFileTokenFromCollection(String filePathName, String collectionUuid) {
+        return listFileInfoFromCollection(collectionUuid)
+                .stream()
+                .filter(p -> (p.getFullPath()).equals(filePathName))
+                .findFirst()
+                .orElse(null);
+    }
+
+    private File checkIfFileExistsInTargetLocation(FileToken fileToken, String pathToDownloadFolder) {
+        String fileName = fileToken.getFileName();
+
+        File downloadFile = new File(pathToDownloadFolder + Characters.SLASH + fileName);
+        if (downloadFile.exists()) {
+            throw new FileAlreadyExistsException(String.format("File %s exists in location %s", fileName, pathToDownloadFolder));
+        } else {
+            return downloadFile;
+        }
+    }
+
+    private File downloadOneFileFromCollectionUsingKeepWeb(FileToken fileToken, String collectionUuid, String pathToDownloadFolder) {
+        String filePathName = fileToken.getPath() + fileToken.getFileName();
+        File downloadedFile = new File(pathToDownloadFolder + Characters.SLASH + filePathName);
+        downloadedFile.getParentFile().mkdirs();
+
+        try (FileOutputStream fos = new FileOutputStream(downloadedFile)) {
+            fos.write(keepWebApiClient.download(collectionUuid, filePathName));
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+        return downloadedFile;
+    }
+
+    public List<File> downloadFilesFromCollection(String collectionUuid, String pathToDownloadFolder) {
+
+        // download requested collection and extract manifest text
+        Collection requestedCollection = collectionsApiClient.get(collectionUuid);
+        String manifestText = requestedCollection.getManifestText();
+
+        // if directory with this collectionUUID does not exist - create one
+        // if exists - abort (throw exception)
+        File collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder);
+
+        // decode manifest text and create list of ManifestStream objects containing KeepLocators and FileTokens
+        List<ManifestStream> manifestStreams = manifestDecoder.decode(manifestText);
+
+        //list of all downloaded files that will be returned by this method
+        List<File> downloadedFilesFromCollection = new ArrayList<>();
+
+        // download files for each manifest stream
+        for (ManifestStream manifestStream : manifestStreams)
+            downloadedFilesFromCollection.addAll(downloadFilesFromSingleManifestStream(manifestStream, collectionTargetDir));
+
+        log.debug(String.format("Total of: %d files downloaded", downloadedFilesFromCollection.size()));
+        return downloadedFilesFromCollection;
+    }
+
+    private File setTargetDirectory(String collectionUUID, String pathToDownloadFolder) {
+        //local directory to save downloaded files
+        File collectionTargetDir = new File(pathToDownloadFolder + Characters.SLASH + collectionUUID);
+        if (collectionTargetDir.exists()) {
+            throw new DownloadFolderAlreadyExistsException(String.format("Directory for collection UUID %s already exists", collectionUUID));
+        } else {
+            collectionTargetDir.mkdirs();
+        }
+        return collectionTargetDir;
+    }
+
+    private List<File> downloadFilesFromSingleManifestStream(ManifestStream manifestStream, File collectionTargetDir){
+        List<File> downloadedFiles = new ArrayList<>();
+        List<KeepLocator> keepLocators = manifestStream.getKeepLocators();
+        DownloadHelper downloadHelper = new DownloadHelper(keepLocators);
+
+        for (FileToken fileToken : manifestStream.getFileTokens()) {
+            File downloadedFile = new File(collectionTargetDir.getAbsolutePath() + Characters.SLASH + fileToken.getFullPath()); //create file
+            downloadedFile.getParentFile().mkdirs();
+
+            try (FileOutputStream fos = new FileOutputStream(downloadedFile, true)) {
+                downloadHelper.setBytesToDownload(fileToken.getFileSize()); //update file size info
+
+                //this part needs to be repeated for each file until whole file is downloaded
+                do {
+                    downloadHelper.requestNewDataChunk(); //check if new data chunk needs to be downloaded
+                    downloadHelper.writeDownFile(fos); // download data from chunk
+                } while (downloadHelper.getBytesToDownload() != 0);
+
+            } catch (IOException | ArvadosClientException e) {
+                throw new ArvadosClientException(String.format("Unable to write down file %s", fileToken.getFileName()), e);
+            }
+
+            downloadedFiles.add(downloadedFile);
+            log.debug(String.format("File %d / %d downloaded from manifest stream",
+                    manifestStream.getFileTokens().indexOf(fileToken) + 1,
+                    manifestStream.getFileTokens().size()));
+        }
+        return downloadedFiles;
+    }
+
+    private class DownloadHelper {
+
+        // values for tracking file output streams and matching data chunks with initial files
+        int currentDataChunkNumber;
+        int bytesDownloadedFromChunk;
+        int bytesToDownload;
+        byte[] currentDataChunk;
+        boolean remainingDataInChunk;
+        final List<KeepLocator> keepLocators;
+
+        private DownloadHelper(List<KeepLocator> keepLocators) {
+            currentDataChunkNumber = -1;
+            bytesDownloadedFromChunk = 0;
+            remainingDataInChunk = false;
+            this.keepLocators = keepLocators;
+        }
+
+        private int getBytesToDownload() {
+            return bytesToDownload;
+        }
+
+        private void setBytesToDownload(int bytesToDownload) {
+            this.bytesToDownload = bytesToDownload;
+        }
+
+        private void requestNewDataChunk() {
+            if (!remainingDataInChunk) {
+                currentDataChunkNumber++;
+                if (currentDataChunkNumber < keepLocators.size()) {
+                    //swap data chunk for next one
+                    currentDataChunk = keepClient.getDataChunk(keepLocators.get(currentDataChunkNumber));
+                    log.debug(String.format("%d of %d data chunks from manifest stream downloaded", currentDataChunkNumber + 1, keepLocators.size()));
+                } else {
+                    throw new ArvadosClientException("Data chunk required for download is missing.");
+                }
+            }
+        }
+
+        private void writeDownFile(FileOutputStream fos) throws IOException {
+            //case 1: more bytes needed than available in current chunk (or whole current chunk needed) to download file
+            if (bytesToDownload >= currentDataChunk.length - bytesDownloadedFromChunk) {
+                writeDownWholeDataChunk(fos);
+            }
+            //case 2: current data chunk contains more bytes than is needed for this file
+            else {
+                writeDownDataChunkPartially(fos);
+            }
+        }
+
+        private void writeDownWholeDataChunk(FileOutputStream fos) throws IOException {
+            // write all remaining bytes from current chunk
+            fos.write(currentDataChunk, bytesDownloadedFromChunk, currentDataChunk.length - bytesDownloadedFromChunk);
+            //update bytesToDownload
+            bytesToDownload -= (currentDataChunk.length - bytesDownloadedFromChunk);
+            // set remaining data in chunk to false
+            remainingDataInChunk = false;
+            //reset bytesDownloadedFromChunk so that its set to 0 for the next chunk
+            bytesDownloadedFromChunk = 0;
+        }
+
+        private void writeDownDataChunkPartially(FileOutputStream fos) throws IOException {
+            //write all remaining bytes for this file from current chunk
+            fos.write(currentDataChunk, bytesDownloadedFromChunk, bytesToDownload);
+            // update number of bytes downloaded from this chunk
+            bytesDownloadedFromChunk += bytesToDownload;
+            // set remaining data in chunk to true
+            remainingDataInChunk = true;
+            // reset bytesToDownload to exit while loop and move to the next file
+            bytesToDownload = 0;
+        }
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileTransferHandler.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileTransferHandler.java
new file mode 100644 (file)
index 0000000..c6a8ad3
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import org.arvados.client.api.client.KeepServerApiClient;
+import org.arvados.client.exception.ArvadosApiException;
+import org.arvados.client.config.ConfigProvider;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.util.Map;
+
+public class FileTransferHandler {
+
+    private final String host;
+    private final KeepServerApiClient keepServerApiClient;
+    private final Map<String, String> headers;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(FileTransferHandler.class);
+
+    public FileTransferHandler(String host, Map<String, String> headers, ConfigProvider config) {
+        this.host = host;
+        this.headers = headers;
+        this.keepServerApiClient = new KeepServerApiClient(config);
+    }
+
+    public String put(String hashString, File body) {
+        String url = host + hashString;
+        String locator = null;
+        try {
+            locator = keepServerApiClient.upload(url, headers, body);
+        } catch (ArvadosApiException e) {
+            log.error("Cannot upload file to Keep server.", e);
+        }
+        return locator;
+    }
+
+    public byte[] get(KeepLocator locator) {
+        return get(locator.stripped(), locator.permissionHint());
+    }
+
+    public byte[] get(String blockLocator, String authToken) {
+        String url = host + blockLocator + "+" + authToken;
+        try {
+            return keepServerApiClient.download(url);
+        } catch (ArvadosApiException e) {
+            log.error("Cannot download file from Keep server.", e);
+            return  null;
+        }
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java
new file mode 100644 (file)
index 0000000..52e0f66
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import com.google.common.collect.Lists;
+import org.arvados.client.api.client.CollectionsApiClient;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.common.Characters;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.exception.ArvadosClientException;
+import org.arvados.client.logic.collection.CollectionFactory;
+import org.arvados.client.utils.FileMerge;
+import org.arvados.client.utils.FileSplit;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.UUID;
+
+import static java.util.stream.Collectors.toList;
+
+public class FileUploader {
+
+    private final KeepClient keepClient;
+    private final CollectionsApiClient collectionsApiClient;
+    private final ConfigProvider config;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(FileUploader.class);
+
+    public FileUploader(KeepClient keepClient, CollectionsApiClient collectionsApiClient, ConfigProvider config) {
+        this.keepClient = keepClient;
+        this.collectionsApiClient = collectionsApiClient;
+        this.config = config;
+    }
+
+    public Collection upload(List<File> sourceFiles, String collectionName, String projectUuid) {
+        List<String> locators = uploadToKeep(sourceFiles);
+        CollectionFactory collectionFactory = CollectionFactory.builder()
+                .config(config)
+                .name(collectionName)
+                .projectUuid(projectUuid)
+                .manifestFiles(sourceFiles)
+                .manifestLocators(locators)
+                .build();
+
+        Collection newCollection = collectionFactory.create();
+        return collectionsApiClient.create(newCollection);
+    }
+
+    public Collection uploadToExistingCollection(List<File> files, String collectionUuid) {
+        List<String> locators = uploadToKeep(files);
+        Collection collectionBeforeUpload = collectionsApiClient.get(collectionUuid);
+        String oldManifest = collectionBeforeUpload.getManifestText();
+
+        CollectionFactory collectionFactory = CollectionFactory.builder()
+                .config(config)
+                .manifestFiles(files)
+                .manifestLocators(locators).build();
+
+        String newPartOfManifestText = collectionFactory.create().getManifestText();
+        String newManifest = oldManifest + newPartOfManifestText;
+
+        collectionBeforeUpload.setManifestText(newManifest);
+        return collectionsApiClient.update(collectionBeforeUpload);
+    }
+
+    private List<String> uploadToKeep(List<File> files) {
+        File targetDir = config.getFileSplitDirectory();
+        File combinedFile = new File(targetDir.getAbsolutePath() + Characters.SLASH + UUID.randomUUID());
+        List<File> chunks;
+        try {
+            FileMerge.merge(files, combinedFile);
+            chunks = FileSplit.split(combinedFile, targetDir, config.getFileSplitSize());
+        } catch (IOException e) {
+            throw new ArvadosClientException("Cannot create file chunks for upload", e);
+        }
+        combinedFile.delete();
+
+        int copies = config.getNumberOfCopies();
+        int numRetries = config.getNumberOfRetries();
+
+        List<String> locators = Lists.newArrayList();
+        for (File chunk : chunks) {
+            try {
+                locators.add(keepClient.put(chunk, copies, numRetries));
+            } catch (ArvadosClientException e) {
+                log.error("Problem occurred while uploading chunk file {}", chunk.getName(), e);
+                throw e;
+            }
+        }
+        return locators.stream()
+                .filter(Objects::nonNull)
+                .collect(toList());
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepClient.java
new file mode 100644 (file)
index 0000000..9cc732d
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.io.FileUtils;
+import org.arvados.client.api.client.KeepServicesApiClient;
+import org.arvados.client.api.model.KeepService;
+import org.arvados.client.api.model.KeepServiceList;
+import org.arvados.client.common.Characters;
+import org.arvados.client.common.Headers;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.exception.ArvadosApiException;
+import org.arvados.client.exception.ArvadosClientException;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+public class KeepClient {
+
+    private final KeepServicesApiClient keepServicesApiClient;
+    private final Logger log = org.slf4j.LoggerFactory.getLogger(KeepClient.class);
+    private List<KeepService> keepServices;
+    private List<KeepService> writableServices;
+    private Map<String, KeepService> gatewayServices;
+    private final String apiToken;
+    private Integer maxReplicasPerService;
+    private final ConfigProvider config;
+
+    public KeepClient(ConfigProvider config) {
+        this.config = config;
+        keepServicesApiClient = new KeepServicesApiClient(config);
+        apiToken = config.getApiToken();
+    }
+
+    public byte[] getDataChunk(KeepLocator keepLocator) {
+
+        Map<String, String> headers = new HashMap<>();
+        Map<String, FileTransferHandler> rootsMap = new HashMap<>();
+
+        List<String> sortedRoots = mapNewServices(rootsMap, keepLocator, false, false, headers);
+
+        byte[] dataChunk = sortedRoots
+                .stream()
+                .map(rootsMap::get)
+                .map(r -> r.get(keepLocator))
+                .filter(Objects::nonNull)
+                .findFirst()
+                .orElse(null);
+
+        if (dataChunk == null) {
+            throw new ArvadosClientException("No server responding. Unable to download data chunk.");
+        }
+
+        return dataChunk;
+    }
+
+    public String put(File data, int copies, int numRetries) {
+
+        byte[] fileBytes;
+        try {
+            fileBytes = FileUtils.readFileToByteArray(data);
+        } catch (IOException e) {
+            throw new ArvadosClientException("An error occurred while reading data chunk", e);
+        }
+
+        String dataHash = DigestUtils.md5Hex(fileBytes);
+        String locatorString = String.format("%s+%d", dataHash, data.length());
+
+        if (copies < 1) {
+            return locatorString;
+        }
+        KeepLocator locator = new KeepLocator(locatorString);
+
+        // Tell the proxy how many copies we want it to store
+        Map<String, String> headers = new HashMap<>();
+        headers.put(Headers.X_KEEP_DESIRED_REPLICAS, String.valueOf(copies));
+
+        Map<String, FileTransferHandler> rootsMap = new HashMap<>();
+        List<String> sortedRoots = mapNewServices(rootsMap, locator, false, true, headers);
+
+        int numThreads = 0;
+        if (maxReplicasPerService == null || maxReplicasPerService >= copies) {
+            numThreads = 1;
+        } else {
+            numThreads = ((Double) Math.ceil(1.0 * copies / maxReplicasPerService)).intValue();
+        }
+        log.debug("Pool max threads is {}", numThreads);
+
+        List<CompletableFuture<String>> futures = Lists.newArrayList();
+        for (int i = 0; i < numThreads; i++) {
+            String root = sortedRoots.get(i);
+            FileTransferHandler keepServiceLocal = rootsMap.get(root);
+            futures.add(CompletableFuture.supplyAsync(() -> keepServiceLocal.put(dataHash, data)));
+        }
+
+        @SuppressWarnings("unchecked")
+        CompletableFuture<String>[] array = futures.toArray(new CompletableFuture[0]);
+
+        return Stream.of(array)
+                .map(CompletableFuture::join)
+                .reduce((a, b) -> b)
+                .orElse(null);
+    }
+
+    private List<String> mapNewServices(Map<String, FileTransferHandler> rootsMap, KeepLocator locator,
+                                        boolean forceRebuild, boolean needWritable, Map<String, String> headers) {
+
+        headers.putIfAbsent("Authorization", String.format("OAuth2 %s", apiToken));
+        List<String> localRoots = weightedServiceRoots(locator, forceRebuild, needWritable);
+        for (String root : localRoots) {
+            FileTransferHandler keepServiceLocal = new FileTransferHandler(root, headers, config);
+            rootsMap.putIfAbsent(root, keepServiceLocal);
+        }
+        return localRoots;
+    }
+
+    /**
+     * Return an array of Keep service endpoints, in the order in which they should be probed when reading or writing
+     * data with the given hash+hints.
+     */
+    private List<String> weightedServiceRoots(KeepLocator locator, boolean forceRebuild, boolean needWritable) {
+
+        buildServicesList(forceRebuild);
+
+        List<String> sortedRoots = new ArrayList<>();
+
+        // Use the services indicated by the given +K@... remote
+        // service hints, if any are present and can be resolved to a
+        // URI.
+        //
+        for (String hint : locator.getHints()) {
+            if (hint.startsWith("K@")) {
+                if (hint.length() == 7) {
+                    sortedRoots.add(String.format("https://keep.%s.arvadosapi.com/", hint.substring(2)));
+                } else if (hint.length() == 29) {
+                    KeepService svc = gatewayServices.get(hint.substring(2));
+                    if (svc != null) {
+                        sortedRoots.add(svc.getServiceRoot());
+                    }
+                }
+            }
+        }
+
+        // Sort the available local services by weight (heaviest first)
+        // for this locator, and return their service_roots (base URIs)
+        // in that order.
+        List<KeepService> useServices = keepServices;
+        if (needWritable) {
+            useServices = writableServices;
+        }
+        anyNonDiskServices(useServices);
+
+        sortedRoots.addAll(useServices
+                .stream()
+                .sorted((ks1, ks2) -> serviceWeight(locator.getMd5sum(), ks2.getUuid())
+                        .compareTo(serviceWeight(locator.getMd5sum(), ks1.getUuid())))
+                .map(KeepService::getServiceRoot)
+                .collect(Collectors.toList()));
+
+        return sortedRoots;
+    }
+
+    private void buildServicesList(boolean forceRebuild) {
+        if (keepServices != null && !forceRebuild) {
+            return;
+        }
+        KeepServiceList keepServiceList;
+        try {
+            keepServiceList = keepServicesApiClient.accessible();
+        } catch (ArvadosApiException e) {
+            throw new ArvadosClientException("Cannot obtain list of accessible keep services");
+        }
+        // Gateway services are only used when specified by UUID,
+        // so there's nothing to gain by filtering them by
+        // service_type.
+        gatewayServices = keepServiceList.getItems().stream().collect(Collectors.toMap(KeepService::getUuid, Function.identity()));
+
+        if (gatewayServices.isEmpty()) {
+            throw new ArvadosClientException("No gateway services available!");
+        }
+
+        // Precompute the base URI for each service.
+        for (KeepService keepService : gatewayServices.values()) {
+            String serviceHost = keepService.getServiceHost();
+            if (!serviceHost.startsWith("[") && serviceHost.contains(Characters.COLON)) {
+                // IPv6 URIs must be formatted like http://[::1]:80/...
+                serviceHost = String.format("[%s]", serviceHost);
+            }
+
+            String protocol = keepService.getServiceSslFlag() ? "https" : "http";
+            String serviceRoot = String.format("%s://%s:%d/", protocol, serviceHost, keepService.getServicePort());
+            keepService.setServiceRoot(serviceRoot);
+        }
+
+        keepServices = gatewayServices.values().stream().filter(ks -> !ks.getServiceType().startsWith("gateway:")).collect(Collectors.toList());
+        writableServices = keepServices.stream().filter(ks -> !ks.getReadOnly()).collect(Collectors.toList());
+
+        // For disk type services, max_replicas_per_service is 1
+        // It is unknown (unlimited) for other service types.
+        if (anyNonDiskServices(writableServices)) {
+            maxReplicasPerService = null;
+        } else {
+            maxReplicasPerService = 1;
+        }
+    }
+
+    private Boolean anyNonDiskServices(List<KeepService> useServices) {
+        return useServices.stream().anyMatch(ks -> !ks.getServiceType().equals("disk"));
+    }
+
+    /**
+     * Compute the weight of a Keep service endpoint for a data block with a known hash.
+     * <p>
+     * The weight is md5(h + u) where u is the last 15 characters of the service endpoint's UUID.
+     */
+    private static String serviceWeight(String dataHash, String serviceUuid) {
+        String shortenedUuid;
+        if (serviceUuid != null && serviceUuid.length() >= 15) {
+            int substringIndex = serviceUuid.length() - 15;
+            shortenedUuid = serviceUuid.substring(substringIndex);
+        } else {
+            shortenedUuid = (serviceUuid == null) ? "" : serviceUuid;
+        }
+        return DigestUtils.md5Hex(dataHash + shortenedUuid);
+    }
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java
new file mode 100644 (file)
index 0000000..4d3d425
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import org.arvados.client.exception.ArvadosClientException;
+
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneOffset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.arvados.client.common.Patterns.HINT_PATTERN;
+
+public class KeepLocator {
+
+    private final List<String> hints = new ArrayList<>();
+    private String permSig;
+    private LocalDateTime permExpiry;
+    private final String md5sum;
+    private final Integer size;
+
+    public KeepLocator(String locatorString) {
+        LinkedList<String> pieces = new LinkedList<>(Arrays.asList(locatorString.split("\\+")));
+
+        md5sum = pieces.poll();
+        size = Integer.valueOf(Objects.requireNonNull(pieces.poll()));
+
+        for (String hint : pieces) {
+            if (!hint.matches(HINT_PATTERN)) {
+                throw new ArvadosClientException(String.format("invalid hint format: %s", hint));
+            } else if (hint.startsWith("A")) {
+                parsePermissionHint(hint);
+            } else {
+                hints.add(hint);
+            }
+        }
+    }
+
+    public List<String> getHints() {
+        return hints;
+    }
+
+    public String getMd5sum() {
+        return md5sum;
+    }
+
+    @Override
+    public String toString() {
+        return Stream.concat(Stream.of(md5sum, size.toString(), permissionHint()), hints.stream())
+                .filter(Objects::nonNull)
+                .collect(Collectors.joining("+"));
+    }
+
+    public String stripped() {
+        return size != null ? String.format("%s+%d", md5sum, size) : md5sum;
+    }
+
+    public String permissionHint() {
+        if (permSig == null || permExpiry == null) {
+            return null;
+        }
+
+        long timestamp = permExpiry.toEpochSecond(ZoneOffset.UTC);
+        String signTimestamp = Long.toHexString(timestamp);
+        return String.format("A%s@%s", permSig, signTimestamp);
+    }
+
+    private void parsePermissionHint(String hint) {
+        String[] hintSplit = hint.substring(1).split("@", 2);
+        permSig = hintSplit[0];
+
+        int permExpiryDecimal = Integer.parseInt(hintSplit[1], 16);
+        permExpiry = LocalDateTime.ofInstant(Instant.ofEpochSecond(permExpiryDecimal), ZoneOffset.UTC);
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java
new file mode 100644 (file)
index 0000000..9968ff0
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep.exception;
+
+import org.arvados.client.exception.ArvadosClientException;
+
+/**
+ * Exception indicating that directory with given name was already created in specified location.
+ *
+ * <p> This exception will be thrown during an attempt to download all files from certain
+ * collection to a location that already contains folder named by this collection's UUID.</p>
+ */
+public class DownloadFolderAlreadyExistsException extends ArvadosClientException {
+
+    public DownloadFolderAlreadyExistsException(String message) {
+        super(message);
+    }
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java
new file mode 100644 (file)
index 0000000..ea02ffc
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep.exception;
+
+import org.arvados.client.exception.ArvadosClientException;
+
+/**
+ * Signals that an attempt to download a file with given name has failed for a specified
+ * download location.
+ *
+ * <p> This exception will be thrown during an attempt to download single file to a location
+ * that already contains file with given name</p>
+ */
+public class FileAlreadyExistsException extends ArvadosClientException {
+
+    public FileAlreadyExistsException(String message) { super(message); }
+
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/utils/FileMerge.java b/sdk/java-v2/src/main/java/org/arvados/client/utils/FileMerge.java
new file mode 100644 (file)
index 0000000..eaabbaa
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.utils;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.util.Collection;
+
+public class FileMerge {
+
+    public static void merge(Collection<File> files, File targetFile) throws IOException {
+        try (FileOutputStream fos = new FileOutputStream(targetFile); BufferedOutputStream mergingStream = new BufferedOutputStream(fos)) {
+            for (File file : files) {
+                Files.copy(file.toPath(), mergingStream);
+            }
+        }
+    }
+}
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/utils/FileSplit.java b/sdk/java-v2/src/main/java/org/arvados/client/utils/FileSplit.java
new file mode 100644 (file)
index 0000000..e118edc
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.utils;
+
+import org.apache.commons.io.FileUtils;
+
+import java.io.*;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Based on:
+ * {@link} https://stackoverflow.com/questions/10864317/how-to-break-a-file-into-pieces-using-java
+ */
+public class FileSplit {
+
+    public static List<File> split(File f, File dir, int splitSize) throws IOException {
+        int partCounter = 1;
+
+        long sizeOfFiles = splitSize * FileUtils.ONE_MB;
+        byte[] buffer = new byte[(int) sizeOfFiles];
+
+        List<File> files = new ArrayList<>();
+        String fileName = f.getName();
+
+        try (FileInputStream fis = new FileInputStream(f); BufferedInputStream bis = new BufferedInputStream(fis)) {
+            int bytesAmount = 0;
+            while ((bytesAmount = bis.read(buffer)) > 0) {
+                String filePartName = String.format("%s.%03d", fileName, partCounter++);
+                File newFile = new File(dir, filePartName);
+                try (FileOutputStream out = new FileOutputStream(newFile)) {
+                    out.write(buffer, 0, bytesAmount);
+                }
+                files.add(newFile);
+            }
+        }
+        return files;
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/resources/reference.conf b/sdk/java-v2/src/main/resources/reference.conf
new file mode 100644 (file)
index 0000000..3ff2bb0
--- /dev/null
@@ -0,0 +1,23 @@
+# Arvados client default configuration
+#
+# Remarks:
+# * While providing data remove apostrophes ("") from each line
+# * See Arvados documentation for information how to obtain a token:
+#   https://doc.arvados.org/user/reference/api-tokens.html
+#
+
+arvados {
+    api {
+       keepweb-host = localhost
+       keepweb-port = 8000
+       host = localhost
+       port = 8000
+       token = ""
+       protocol = https
+       host-insecure = false
+    }
+    split-size = 64
+    temp-dir = /tmp/file-split
+    copies = 2
+    retries = 0
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java
new file mode 100644 (file)
index 0000000..73b559a
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.HttpUrl;
+import org.arvados.client.api.model.Item;
+import org.arvados.client.api.model.ItemList;
+import org.arvados.client.test.utils.ArvadosClientUnitTest;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Spy;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+@RunWith(MockitoJUnitRunner.class)
+public class BaseStandardApiClientTest extends ArvadosClientUnitTest {
+
+    @Spy
+    private BaseStandardApiClient<?, ?> client = new BaseStandardApiClient<Item, ItemList>(CONFIG) {
+        @Override
+        String getResource() {
+            return "resource";
+        }
+
+        @Override
+        Class<Item> getType() {
+            return null;
+        }
+
+        @Override
+        Class<ItemList> getListType() {
+            return null;
+        }
+    };
+
+    @Test
+    public void urlBuilderBuildsExpectedUrlFormat() {
+        // when
+        HttpUrl.Builder actual = client.getUrlBuilder();
+
+        // then
+        assertThat(actual.build().toString()).isEqualTo("http://localhost:9000/arvados/v1/resource");
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
new file mode 100644 (file)
index 0000000..8da3bfb
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.mockwebserver.RecordedRequest;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.test.utils.RequestMethod;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Test;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
+
+    private static final String RESOURCE = "collections";
+
+    private CollectionsApiClient client = new CollectionsApiClient(CONFIG);
+
+    @Test
+    public void listCollections() throws Exception {
+
+        // given
+        server.enqueue(getResponse("collections-list"));
+
+        // when
+        CollectionList actual = client.list();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getItemsAvailable()).isEqualTo(41);
+    }
+
+    @Test
+    public void getCollection() throws Exception {
+
+        // given
+        server.enqueue(getResponse("collections-get"));
+
+        String uuid = "112ci-4zz18-p51w7z3fpopo6sm";
+
+        // when
+        Collection actual = client.get(uuid);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + "/" + uuid);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getUuid()).isEqualTo(uuid);
+        assertThat(actual.getPortableDataHash()).isEqualTo("6c4106229b08fe25f48b3a7a8289dd46+143");
+    }
+
+    @Test
+    public void createCollection() throws Exception {
+
+        // given
+        server.enqueue(getResponse("collections-create-simple"));
+
+        String name = "Super Collection";
+        
+        Collection collection = new Collection();
+        collection.setName(name);
+
+        // when
+        Collection actual = client.create(collection);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.POST);
+        assertThat(actual.getName()).isEqualTo(name);
+        assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
+        assertThat(actual.getManifestText()).isEmpty();
+    }
+
+    @Test
+    public void createCollectionWithManifest() throws Exception {
+
+        // given
+        server.enqueue(getResponse("collections-create-manifest"));
+
+        String name = "Super Collection";
+        String manifestText = ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n";
+        
+        Collection collection = new Collection();
+        collection.setName(name);
+        collection.setManifestText(manifestText);
+
+        // when
+        Collection actual = client.create(collection);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.POST);
+        assertThat(actual.getName()).isEqualTo(name);
+        assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
+        assertThat(actual.getManifestText()).isEqualTo(manifestText);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java
new file mode 100644 (file)
index 0000000..6bb385a
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import com.google.common.collect.Lists;
+import okhttp3.mockwebserver.RecordedRequest;
+import org.arvados.client.api.model.Group;
+import org.arvados.client.api.model.GroupList;
+import org.arvados.client.api.model.argument.Filter;
+import org.arvados.client.api.model.argument.ListArgument;
+import org.arvados.client.test.utils.RequestMethod;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.*;
+import static org.junit.Assert.assertEquals;
+
+public class GroupsApiClientTest extends ArvadosClientMockedWebServerTest {
+    private static final String RESOURCE = "groups";
+    private GroupsApiClient client = new GroupsApiClient(CONFIG);
+
+    @Test
+    public void listGroups() throws Exception {
+
+        // given
+        server.enqueue(getResponse("groups-list"));
+
+        // when
+        GroupList actual = client.list();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertEquals(20, actual.getItems().size());
+    }
+
+    @Test
+    public void listProjectsByOwner() throws Exception {
+
+        // given
+        server.enqueue(getResponse("groups-list"));
+        String ownerUuid = "ardev-tpzed-n3kzq4fvoks3uw4";
+        String filterSubPath = "?filters=[%20[%20%22owner_uuid%22,%20%22like%22,%20%22ardev-tpzed-n3kzq4fvoks3uw4%22%20],%20" +
+                "[%20%22group_class%22,%20%22in%22,%20[%20%22project%22,%20%22sub-project%22%20]%20]%20]";
+
+        // when
+        ListArgument listArgument = ListArgument.builder()
+                .filters(Arrays.asList(
+                        Filter.of("owner_uuid", Filter.Operator.LIKE, ownerUuid),
+                        Filter.of("group_class", Filter.Operator.IN, Lists.newArrayList("project", "sub-project")
+                        )))
+                .build();
+        GroupList actual = client.list(listArgument);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + filterSubPath);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertEquals(20, actual.getItems().size());
+    }
+
+    @Test
+    public void getGroup() throws Exception {
+
+        // given
+        server.enqueue(getResponse("groups-get"));
+
+        String uuid = "ardev-j7d0g-bmg3pfqtx3ivczp";
+
+        // when
+        Group actual = client.get(uuid);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + "/" + uuid);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertEquals(uuid, actual.getUuid());
+        assertEquals("3hw0vk4mbl0ofvia5k6x4dwrx", actual.getEtag());
+        assertEquals("ardev-tpzed-n3kzq4fvoks3uw4", actual.getOwnerUuid());
+        assertEquals("TestGroup1", actual.getName());
+        assertEquals("project", actual.getGroupClass());
+
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServerApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServerApiClientTest.java
new file mode 100644 (file)
index 0000000..50a9cc1
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import com.google.common.collect.Maps;
+import okhttp3.mockwebserver.MockResponse;
+import okhttp3.mockwebserver.RecordedRequest;
+import okio.Buffer;
+import org.apache.commons.io.FileUtils;
+import org.arvados.client.common.Headers;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Map;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.assertAuthorizationHeader;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class KeepServerApiClientTest extends ArvadosClientMockedWebServerTest {
+
+    private KeepServerApiClient client = new KeepServerApiClient(CONFIG);
+
+    @Test
+    public void uploadFileToServer() throws Exception {
+
+        // given
+        String blockLocator = "7df44272090cee6c0732382bba415ee9";
+        String signedBlockLocator = blockLocator + "+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6";
+        server.enqueue(new MockResponse().setBody(signedBlockLocator));
+
+        String url = server.url(blockLocator).toString();
+        File body = new File("README.md");
+        Map<String, String> headers = Maps.newHashMap();
+        headers.put(Headers.X_KEEP_DESIRED_REPLICAS, "2");
+
+        // when
+        String actual = client.upload(url, headers, body);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertThat(request.getPath()).isEqualTo("/" + blockLocator);
+
+        assertThat(actual).isEqualTo(signedBlockLocator);
+    }
+
+    @Test
+    public void downloadFileFromServer() throws Exception {
+        File data = new File("README.md");
+        byte[] fileBytes = FileUtils.readFileToByteArray(data);
+        server.enqueue(new MockResponse().setBody(new Buffer().write(fileBytes)));
+
+        String blockLocator = "7df44272090cee6c0732382bba415ee9";
+        String signedBlockLocator = blockLocator + "+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6";
+
+        String url = server.url(signedBlockLocator).toString();
+
+        byte[] actual = client.download(url);
+        RecordedRequest request = server.takeRequest();
+        assertThat(request.getPath()).isEqualTo("/" + signedBlockLocator);
+        assertThat(actual).isEqualTo(fileBytes);
+
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServicesApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepServicesApiClientTest.java
new file mode 100644 (file)
index 0000000..015f832
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.mockwebserver.RecordedRequest;
+import org.arvados.client.api.model.KeepService;
+import org.arvados.client.api.model.KeepServiceList;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Test;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class KeepServicesApiClientTest extends ArvadosClientMockedWebServerTest {
+
+    private static final String RESOURCE = "keep_services";
+
+    private KeepServicesApiClient client = new KeepServicesApiClient(CONFIG);
+
+    @Test
+    public void listKeepServices() throws Exception {
+
+        // given
+        server.enqueue(getResponse("keep-services-list"));
+
+        // when
+        KeepServiceList actual = client.list();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+
+        assertThat(actual.getItemsAvailable()).isEqualTo(3);
+
+    }
+
+    @Test
+    public void listAccessibleKeepServices() throws Exception {
+
+        // given
+        server.enqueue(getResponse("keep-services-accessible"));
+
+        // when
+        KeepServiceList actual = client.accessible();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + "/accessible");
+        assertThat(actual.getItemsAvailable()).isEqualTo(2);
+    }
+
+    @Test
+    public void getKeepService() throws Exception {
+
+        // given
+        server.enqueue(getResponse("keep-services-get"));
+
+        String uuid = "112ci-bi6l4-hv02fg8sbti8ykk";
+
+        // whenFs
+        KeepService actual = client.get(uuid);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + "/" + uuid);
+        assertThat(actual.getUuid()).isEqualTo(uuid);
+        assertThat(actual.getServiceType()).isEqualTo("disk");
+    }
+
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java
new file mode 100644 (file)
index 0000000..40f7bac
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.mockwebserver.RecordedRequest;
+import org.arvados.client.api.model.User;
+import org.arvados.client.api.model.UserList;
+import org.arvados.client.test.utils.RequestMethod;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Test;
+
+import static org.arvados.client.common.Characters.SLASH;
+import static org.arvados.client.test.utils.ApiClientTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class UsersApiClientTest extends ArvadosClientMockedWebServerTest {
+
+    private static final String RESOURCE = "users";
+    private static final String USER_UUID = "ardev-tpzed-q6dvn7sby55up1b";
+
+    private UsersApiClient client = new UsersApiClient(CONFIG);
+
+    @Test
+    public void listUsers() throws Exception {
+
+        // given
+        server.enqueue(getResponse("users-list"));
+
+        // when
+        UserList actual = client.list();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getItemsAvailable()).isEqualTo(13);
+    }
+
+    @Test
+    public void getUser() throws Exception {
+
+        // given
+        server.enqueue(getResponse("users-get"));
+
+        // when
+        User actual = client.get(USER_UUID);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + SLASH + USER_UUID);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getUuid()).isEqualTo(USER_UUID);
+    }
+
+    @Test
+    public void getCurrentUser() throws Exception {
+
+        // given
+        server.enqueue(getResponse("users-get"));
+
+        // when
+        User actual = client.current();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + SLASH + "current");
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getUuid()).isEqualTo(USER_UUID);
+    }
+
+    @Test
+    public void getSystemUser() throws Exception {
+
+        // given
+        server.enqueue(getResponse("users-system"));
+
+        // when
+        User actual = client.system();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + SLASH + "system");
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getUuid()).isEqualTo("ardev-tpzed-000000000000000");
+    }
+
+    @Test
+    public void createUser() throws Exception {
+
+        // given
+        server.enqueue(getResponse("users-create"));
+
+        String firstName = "John";
+        String lastName = "Wayne";
+        String fullName = String.format("%s %s", firstName, lastName);
+        String username = String.format("%s%s", firstName, lastName).toLowerCase();
+
+        User user = new User();
+        user.setFirstName(firstName);
+        user.setLastName(lastName);
+        user.setFullName(fullName);
+        user.setUsername(username);
+
+        // when
+        User actual = client.create(user);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.POST);
+        assertThat(actual.getFirstName()).isEqualTo(firstName);
+        assertThat(actual.getLastName()).isEqualTo(lastName);
+        assertThat(actual.getFullName()).isEqualTo(fullName);
+        assertThat(actual.getUsername()).isEqualTo(username);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java
new file mode 100644 (file)
index 0000000..f7e1813
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client.factory;
+
+import okhttp3.OkHttpClient;
+import okhttp3.Request;
+import okhttp3.Response;
+import okhttp3.mockwebserver.MockResponse;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import javax.net.ssl.KeyManagerFactory;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSocketFactory;
+import javax.net.ssl.TrustManagerFactory;
+import java.io.FileInputStream;
+import java.security.KeyStore;
+
+
+@RunWith(MockitoJUnitRunner.class)
+public class OkHttpClientFactoryTest extends ArvadosClientMockedWebServerTest {
+
+    @Test(expected = javax.net.ssl.SSLHandshakeException.class)
+    public void secureOkHttpClientIsCreated() throws Exception {
+
+        // given
+        OkHttpClientFactory factory = OkHttpClientFactory.builder().build();
+        // * configure HTTPS server
+        SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();
+        server.useHttps(sf, false);
+        server.enqueue(new MockResponse().setBody("OK"));
+        // * prepare client HTTP request
+        Request request = new Request.Builder()
+                .url("https://localhost:9000/")
+                .build();
+
+        // when - then (SSL certificate is verified)
+        OkHttpClient actual = factory.create(false);
+        Response response = actual.newCall(request).execute();
+    }
+
+    @Test
+    public void insecureOkHttpClientIsCreated() throws Exception {
+        // given
+        OkHttpClientFactory factory = OkHttpClientFactory.builder().build();
+        // * configure HTTPS server
+        SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();
+        server.useHttps(sf, false);
+        server.enqueue(new MockResponse().setBody("OK"));
+        // * prepare client HTTP request
+        Request request = new Request.Builder()
+                .url("https://localhost:9000/")
+                .build();
+
+        // when (SSL certificate is not verified)
+        OkHttpClient actual = factory.create(true);
+        Response response = actual.newCall(request).execute();
+
+        // then
+        Assert.assertEquals(response.body().string(),"OK");
+    }
+
+
+    /*
+        This ugly boilerplate is needed to enable self signed certificate.
+
+        It requires selfsigned.keystore.jks file. It was generated with:
+        keytool -genkey -v -keystore mystore.keystore.jks -alias alias_name -keyalg RSA -keysize 2048 -validity 10000
+     */
+    public SSLSocketFactory getSSLSocketFactoryWithSelfSignedCertificate() throws Exception {
+
+        FileInputStream stream = new FileInputStream("src/test/resources/selfsigned.keystore.jks");
+        char[] serverKeyStorePassword = "123456".toCharArray();
+        KeyStore serverKeyStore = KeyStore.getInstance(KeyStore.getDefaultType());
+        serverKeyStore.load(stream, serverKeyStorePassword);
+
+        String kmfAlgorithm = KeyManagerFactory.getDefaultAlgorithm();
+        KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm);
+        kmf.init(serverKeyStore, serverKeyStorePassword);
+
+        TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(kmfAlgorithm);
+        trustManagerFactory.init(serverKeyStore);
+
+        SSLContext sslContext = SSLContext.getInstance("SSL");
+        sslContext.init(kmf.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);
+        return sslContext.getSocketFactory();
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java b/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java
new file mode 100644 (file)
index 0000000..07269f7
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.facade;
+
+import org.apache.commons.io.FileUtils;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.common.Characters;
+import org.arvados.client.config.ExternalConfigProvider;
+import org.arvados.client.junit.categories.IntegrationTests;
+import org.arvados.client.logic.collection.FileToken;
+import org.arvados.client.test.utils.ArvadosClientIntegrationTest;
+import org.arvados.client.test.utils.FileTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+
+import static org.arvados.client.test.utils.FileTestUtils.FILE_DOWNLOAD_TEST_DIR;
+import static org.arvados.client.test.utils.FileTestUtils.FILE_SPLIT_TEST_DIR;
+import static org.arvados.client.test.utils.FileTestUtils.TEST_FILE;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+@Category(IntegrationTests.class)
+public class ArvadosFacadeIntegrationTest extends ArvadosClientIntegrationTest {
+
+
+    private static final String COLLECTION_NAME = "Test collection " + UUID.randomUUID().toString();
+    private String collectionUuid;
+
+    @Before
+    public void setUp() throws Exception {
+        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);
+    }
+
+    @Test
+    public void uploadOfFileIsPerformedSuccessfully() throws Exception {
+        // given
+        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200);
+
+        // when
+        Collection actual = FACADE.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID);
+        collectionUuid = actual.getUuid();
+
+        // then
+        assertThat(actual.getName()).contains("Test collection");
+        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());
+    }
+
+    @Test
+    public void uploadOfFilesIsPerformedSuccessfully() throws Exception {
+        // given
+        List<File> files = FileTestUtils.generatePredefinedFiles();
+        files.addAll(FileTestUtils.generatePredefinedFiles());
+
+        // when
+        Collection actual = FACADE.upload(files, COLLECTION_NAME, PROJECT_UUID);
+        collectionUuid = actual.getUuid();
+
+        // then
+        assertThat(actual.getName()).contains("Test collection");
+        files.forEach(f -> assertThat(actual.getManifestText()).contains(f.length() + Characters.COLON + f.getName().replace(" ", Characters.SPACE)));
+    }
+
+    @Test
+    public void uploadToExistingCollectionIsPerformedSuccessfully() throws Exception {
+        // given
+        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB / 500);
+        Collection existing = createTestCollection();
+
+        // when
+        Collection actual = FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid);
+
+        // then
+        assertEquals(collectionUuid, actual.getUuid());
+        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());
+    }
+
+    @Test
+    public void uploadWithExternalConfigProviderWorksProperly() throws Exception {
+        //given
+        ArvadosFacade facade = new ArvadosFacade(buildExternalConfig());
+        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200);
+
+        //when
+        Collection actual = facade.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID);
+        collectionUuid = actual.getUuid();
+
+        //then
+        assertThat(actual.getName()).contains("Test collection");
+        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());
+    }
+
+    @Test
+    public void creationOfEmptyCollectionPerformedSuccesfully() {
+        // given
+        String collectionName = "Empty collection " + UUID.randomUUID().toString();
+
+        // when
+        Collection actual = FACADE.createEmptyCollection(collectionName, PROJECT_UUID);
+        collectionUuid = actual.getUuid();
+
+        // then
+        assertEquals(collectionName, actual.getName());
+        assertEquals(PROJECT_UUID, actual.getOwnerUuid());
+    }
+
+    @Test
+    public void fileTokensAreListedFromCollection() throws Exception {
+        //given
+        List<File> files = uploadTestFiles();
+
+        //when
+        List<FileToken> actual = FACADE.listFileInfoFromCollection(collectionUuid);
+
+        //then
+        assertEquals(files.size(), actual.size());
+        for (int i = 0; i < files.size(); i++) {
+            assertEquals(files.get(i).length(), actual.get(i).getFileSize());
+        }
+    }
+
+    @Test
+    public void downloadOfFilesPerformedSuccessfully() throws Exception {
+        //given
+        List<File> files = uploadTestFiles();
+        File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);
+
+        //when
+        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);
+
+        //then
+        assertEquals(files.size(), actual.size());
+        assertTrue(destination.exists());
+        assertThat(actual).allMatch(File::exists);
+        for (int i = 0; i < files.size(); i++) {
+            assertEquals(files.get(i).length(), actual.get(i).length());
+        }
+    }
+
+    @Test
+    public void downloadOfFilesPerformedSuccessfullyUsingKeepWeb() throws Exception {
+        //given
+        List<File> files = uploadTestFiles();
+        File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);
+
+        //when
+        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true);
+
+        //then
+        assertEquals(files.size(), actual.size());
+        assertTrue(destination.exists());
+        assertThat(actual).allMatch(File::exists);
+        for (int i = 0; i < files.size(); i++) {
+            assertEquals(files.get(i).length(), actual.get(i).length());
+        }
+    }
+
+    @Test
+    public void singleFileIsDownloadedSuccessfullyUsingKeepWeb() throws Exception {
+        //given
+        File file = uploadSingleTestFile(false);
+
+        //when
+        File actual = FACADE.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR);
+
+        //then
+        assertThat(actual).exists();
+        assertThat(actual.length()).isEqualTo(file.length());
+    }
+
+    @Test
+    public void downloadOfOneFileSplittedToMultipleLocatorsPerformedSuccesfully() throws Exception {
+        //given
+        File file = uploadSingleTestFile(true);
+
+        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);
+
+        Assert.assertEquals(1, actual.size());
+        assertThat(actual.get(0).length()).isEqualTo(file.length());
+    }
+
+    @Test
+    public void downloadWithExternalConfigProviderWorksProperly() throws Exception {
+        //given
+        ArvadosFacade facade = new ArvadosFacade(buildExternalConfig());
+        List<File> files = uploadTestFiles();
+        //when
+        List<File> actual = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);
+
+        //then
+        assertEquals(files.size(), actual.size());
+        assertThat(actual).allMatch(File::exists);
+        for (int i = 0; i < files.size(); i++) {
+            assertEquals(files.get(i).length(), actual.get(i).length());
+        }
+    }
+
+    private ExternalConfigProvider buildExternalConfig() {
+        return ExternalConfigProvider
+                .builder()
+                .apiHostInsecure(CONFIG.isApiHostInsecure())
+                .keepWebHost(CONFIG.getKeepWebHost())
+                .keepWebPort(CONFIG.getKeepWebPort())
+                .apiHost(CONFIG.getApiHost())
+                .apiPort(CONFIG.getApiPort())
+                .apiToken(CONFIG.getApiToken())
+                .apiProtocol(CONFIG.getApiProtocol())
+                .fileSplitSize(CONFIG.getFileSplitSize())
+                .fileSplitDirectory(CONFIG.getFileSplitDirectory())
+                .numberOfCopies(CONFIG.getNumberOfCopies())
+                .numberOfRetries(CONFIG.getNumberOfRetries())
+                .build();
+    }
+
+    private Collection createTestCollection() {
+        Collection collection = FACADE.createEmptyCollection(COLLECTION_NAME, PROJECT_UUID);
+        collectionUuid = collection.getUuid();
+        return collection;
+    }
+
+    private List<File> uploadTestFiles() throws Exception{
+        createTestCollection();
+        List<File> files = FileTestUtils.generatePredefinedFiles();
+        FACADE.uploadToExistingCollection(files, collectionUuid);
+        return files;
+    }
+
+    private File uploadSingleTestFile(boolean bigFile) throws Exception{
+        createTestCollection();
+        Long fileSize = bigFile ? FileUtils.ONE_MB * 70 : FileTestUtils.ONE_EIGTH_GB / 100;
+        File file = FileTestUtils.generateFile(TEST_FILE, fileSize);
+        FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid);
+        return file;
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);
+
+        if(collectionUuid != null)
+        FACADE.deleteCollection(collectionUuid);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java b/sdk/java-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java
new file mode 100644 (file)
index 0000000..67eebaa
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.facade;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+import org.apache.commons.io.FileUtils;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.api.model.KeepService;
+import org.arvados.client.api.model.KeepServiceList;
+import org.arvados.client.common.Characters;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.arvados.client.test.utils.FileTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.Ignore;
+
+import java.io.File;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
+import static org.arvados.client.test.utils.FileTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ArvadosFacadeTest extends ArvadosClientMockedWebServerTest {
+
+    ArvadosFacade facade = new ArvadosFacade(CONFIG);
+
+    @Before
+    public void setUp() throws Exception {
+        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);
+    }
+
+    @Test
+    @Ignore("Failing test #15041")
+    public void uploadIsPerformedSuccessfullyUsingDiskOnlyKeepServices() throws Exception {
+
+        // given
+        String keepServicesAccessible = setMockedServerPortToKeepServices("keep-services-accessible-disk-only");
+        server.enqueue(new MockResponse().setBody(keepServicesAccessible));
+
+        String blockLocator = "7df44272090cee6c0732382bba415ee9";
+        String signedBlockLocator = blockLocator + "+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6";
+        for (int i = 0; i < 8; i++) {
+            server.enqueue(new MockResponse().setBody(signedBlockLocator));
+        }
+        server.enqueue(getResponse("users-get"));
+        server.enqueue(getResponse("collections-create-manifest"));
+
+        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB);
+
+        // when
+        Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), "Super Collection", null);
+
+        // then
+        assertThat(actual.getName()).contains("Super Collection");
+    }
+
+    @Test
+    public void uploadIsPerformedSuccessfully() throws Exception {
+
+        // given
+        String keepServicesAccessible = setMockedServerPortToKeepServices("keep-services-accessible");
+        server.enqueue(new MockResponse().setBody(keepServicesAccessible));
+
+        String blockLocator = "7df44272090cee6c0732382bba415ee9";
+        String signedBlockLocator = blockLocator + "+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6";
+        for (int i = 0; i < 4; i++) {
+            server.enqueue(new MockResponse().setBody(signedBlockLocator));
+        }
+        server.enqueue(getResponse("users-get"));
+        server.enqueue(getResponse("collections-create-manifest"));
+
+        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB);
+
+        // when
+        Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), "Super Collection", null);
+
+        // then
+        assertThat(actual.getName()).contains("Super Collection");
+    }
+
+    @Test
+    public void downloadOfWholeCollectionIsPerformedSuccessfully() throws Exception {
+
+        //given
+        String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52";
+        server.enqueue(getResponse("collections-download-file"));
+
+        String keepServicesAccessible = setMockedServerPortToKeepServices("keep-services-accessible");
+        server.enqueue(new MockResponse().setBody(keepServicesAccessible));
+        File collectionDestination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);
+
+        List<File> files = generatePredefinedFiles();
+        List<byte[]> fileData = new ArrayList<>();
+        for (File f : files) {
+            fileData.add(Files.readAllBytes(f.toPath()));
+        }
+        byte[] filesDataChunk = fileData.stream().reduce(new byte[0], this::addAll);
+
+        server.enqueue(new MockResponse().setBody(new Buffer().write(filesDataChunk)));
+
+        //when
+        List<File> downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);
+
+        //then
+        assertEquals(3, downloadedFiles.size());
+        assertTrue(collectionDestination.exists());
+        assertThat(downloadedFiles).allMatch(File::exists);
+        assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList()));
+        assertEquals(files.stream().map(File::length).collect(Collectors.toList()), downloadedFiles.stream().map(File::length).collect(Collectors.toList()));
+    }
+
+    @Test
+    public void downloadOfWholeCollectionUsingKeepWebPerformedSuccessfully() throws Exception {
+
+        //given
+        String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52";
+        server.enqueue(getResponse("collections-download-file"));
+
+        List<File> files = generatePredefinedFiles();
+        for (File f : files) {
+            server.enqueue(new MockResponse().setBody(new Buffer().write(FileUtils.readFileToByteArray(f))));
+        }
+
+        //when
+        List<File> downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true);
+
+        //then
+        assertEquals(3, downloadedFiles.size());
+        assertThat(downloadedFiles).allMatch(File::exists);
+        assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList()));
+        assertTrue(downloadedFiles.stream().map(File::length).collect(Collectors.toList()).containsAll(files.stream().map(File::length).collect(Collectors.toList())));
+    }
+
+    @Test
+    public void downloadOfSingleFilePerformedSuccessfully() throws Exception {
+
+        //given
+        String collectionUuid = "ardev-4zz18-jk5vo4uo9u5vj52";
+        server.enqueue(getResponse("collections-download-file"));
+
+        File file = generatePredefinedFiles().get(0);
+        byte[] fileData = FileUtils.readFileToByteArray(file);
+        server.enqueue(new MockResponse().setBody(new Buffer().write(fileData)));
+
+        //when
+        File downloadedFile = facade.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR);
+
+        //then
+        assertTrue(downloadedFile.exists());
+        assertEquals(file.getName(), downloadedFile.getName());
+        assertEquals(file.length(), downloadedFile.length());
+    }
+
+    private String setMockedServerPortToKeepServices(String jsonPath) throws Exception {
+
+        ObjectMapper mapper = new ObjectMapper().findAndRegisterModules();
+        String filePath = String.format("src/test/resources/org/arvados/client/api/client/%s.json", jsonPath);
+        File jsonFile = new File(filePath);
+        String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset());
+        KeepServiceList keepServiceList = mapper.readValue(json, KeepServiceList.class);
+        List<KeepService> items = keepServiceList.getItems();
+        for (KeepService keepService : items) {
+            keepService.setServicePort(server.getPort());
+        }
+        ObjectWriter writer = mapper.writer().withDefaultPrettyPrinter();
+        return writer.writeValueAsString(keepServiceList);
+    }
+
+    //Method to copy multiple byte[] arrays into one byte[] array
+    private byte[] addAll(byte[] array1, byte[] array2) {
+        byte[] joinedArray = new byte[array1.length + array2.length];
+        System.arraycopy(array1, 0, joinedArray, 0, array1.length);
+        System.arraycopy(array2, 0, joinedArray, array1.length, array2.length);
+        return joinedArray;
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java b/sdk/java-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java
new file mode 100644 (file)
index 0000000..6a0e78d
--- /dev/null
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.junit.categories;
+
+public interface IntegrationTests {}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java
new file mode 100644 (file)
index 0000000..1393985
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.common.Characters;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class FileTokenTest {
+
+    public static final String FILE_TOKEN_INFO = "0:1024:test-file1";
+    public static final int FILE_POSITION = 0;
+    public static final int FILE_LENGTH = 1024;
+    public static final String FILE_NAME = "test-file1";
+    public static final String FILE_PATH = "c" + Characters.SLASH;
+
+    private static FileToken fileToken = new FileToken(FILE_TOKEN_INFO);
+    private static FileToken fileTokenWithPath = new FileToken(FILE_TOKEN_INFO, FILE_PATH);
+
+    @Test
+    public void tokenInfoIsDividedCorrectly(){
+        Assert.assertEquals(FILE_NAME, fileToken.getFileName());
+        Assert.assertEquals(FILE_POSITION, fileToken.getFilePosition());
+        Assert.assertEquals(FILE_LENGTH, fileToken.getFileSize());
+    }
+
+    @Test
+    public void toStringReturnsOriginalFileTokenInfo(){
+        Assert.assertEquals(FILE_TOKEN_INFO, fileToken.toString());
+    }
+
+    @Test
+    public void fullPathIsReturnedProperly(){
+        Assert.assertEquals(FILE_NAME, fileToken.getFullPath());
+        Assert.assertEquals(FILE_PATH + FILE_NAME, fileTokenWithPath.getFullPath());
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java
new file mode 100644 (file)
index 0000000..c9464e0
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.exception.ArvadosClientException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+
+import static junit.framework.TestCase.fail;
+
+public class ManifestDecoderTest {
+
+    private ManifestDecoder manifestDecoder = new ManifestDecoder();
+
+    private static final String ONE_LINE_MANIFEST_TEXT = ". " +
+            "eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f " +
+            "db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f " +
+            "0:101147589:rna.SRR948778.bam" +
+            "\\n";
+
+    private static final String MULTIPLE_LINES_MANIFEST_TEXT  = ". " +
+            "930625b054ce894ac40596c3f5a0d947+33 " +
+            "0:0:a 0:0:b 0:33:output.txt\n" +
+            "./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d";
+
+    private static final String MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT = "a" + ONE_LINE_MANIFEST_TEXT;
+
+
+    @Test
+    public void allLocatorsAndFileTokensAreExtractedFromSimpleManifest() {
+
+        List<ManifestStream> actual = manifestDecoder.decode(ONE_LINE_MANIFEST_TEXT);
+
+        // one manifest stream
+        Assert.assertEquals(1, actual.size());
+
+        ManifestStream manifest = actual.get(0);
+        // two locators
+        Assert.assertEquals(2, manifest.getKeepLocators().size());
+        // one file token
+        Assert.assertEquals(1, manifest.getFileTokens().size());
+
+    }
+
+    @Test
+    public void allLocatorsAndFileTokensAreExtractedFromComplexManifest() {
+
+        List<ManifestStream> actual = manifestDecoder.decode(MULTIPLE_LINES_MANIFEST_TEXT);
+
+        // two manifest streams
+        Assert.assertEquals(2, actual.size());
+
+        // first stream - 1 locator and 3 file tokens
+        ManifestStream firstManifestStream = actual.get(0);
+        Assert.assertEquals(1, firstManifestStream.getKeepLocators().size());
+        Assert.assertEquals(3, firstManifestStream.getFileTokens().size());
+
+        // second stream - 1 locator and 1 file token
+        ManifestStream secondManifestStream = actual.get(1);
+        Assert.assertEquals(1, secondManifestStream.getKeepLocators().size());
+        Assert.assertEquals(1, secondManifestStream.getFileTokens().size());
+    }
+
+    @Test
+    public void manifestTextWithInvalidStreamNameThrowsException() {
+
+        try {
+            List<ManifestStream> actual = manifestDecoder.decode(MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT);
+            fail();
+        } catch (ArvadosClientException e) {
+            Assert.assertEquals("Invalid first path component (expecting \".\")", e.getMessage());
+        }
+
+    }
+
+    @Test
+    public void emptyManifestTextThrowsException() {
+        String emptyManifestText = null;
+
+        try {
+            List<ManifestStream> actual = manifestDecoder.decode(emptyManifestText);
+            fail();
+        } catch (ArvadosClientException e) {
+            Assert.assertEquals("Manifest text cannot be empty.", e.getMessage());
+        }
+
+        emptyManifestText = "";
+        try {
+            List<ManifestStream> actual = manifestDecoder.decode(emptyManifestText);
+            fail();
+        } catch (ArvadosClientException e) {
+            Assert.assertEquals("Manifest text cannot be empty.", e.getMessage());
+        }
+
+    }
+
+
+
+
+
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java
new file mode 100644 (file)
index 0000000..a073598
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+import org.arvados.client.test.utils.FileTestUtils;
+import org.assertj.core.util.Lists;
+import org.junit.Test;
+import org.junit.Ignore;
+
+import java.io.File;
+import java.util.List;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class ManifestFactoryTest {
+
+    @Test
+    @Ignore("Failing test #15041")
+    public void manifestIsCreatedAsExpected() throws Exception {
+
+        // given
+        List<File> files = FileTestUtils.generatePredefinedFiles();
+        List<String> locators = Lists.newArrayList("a", "b", "c");
+        ManifestFactory factory = ManifestFactory.builder()
+                .files(files)
+                .locators(locators)
+                .build();
+
+        // when
+        String actual = factory.create();
+
+        // then
+        assertThat(actual).isEqualTo(". a b c 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\0403\n");
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java
new file mode 100644 (file)
index 0000000..bc36889
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.collection;
+
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.List;
+
+public class ManifestStreamTest {
+
+    private ManifestDecoder manifestDecoder = new ManifestDecoder();
+
+    @Test
+    public void toStringReturnsProperlyConnectedManifestStream() throws Exception{
+        String encodedManifest = ". eff999f3b5158331eb44a9a93e3b36e1+67108864 db141bfd11f7da60dce9e5ee85a988b8+34038725 0:101147589:rna.SRR948778.bam\\n\"";
+        List<ManifestStream> manifestStreams = manifestDecoder.decode(encodedManifest);
+        Assert.assertEquals(encodedManifest, manifestStreams.get(0).toString());
+
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
new file mode 100644 (file)
index 0000000..0fb1f02
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.arvados.client.api.client.CollectionsApiClient;
+import org.arvados.client.api.client.KeepWebApiClient;
+import org.arvados.client.api.model.Collection;
+import org.arvados.client.common.Characters;
+import org.arvados.client.logic.collection.FileToken;
+import org.arvados.client.logic.collection.ManifestDecoder;
+import org.arvados.client.logic.collection.ManifestStream;
+import org.arvados.client.test.utils.FileTestUtils;
+import org.arvados.client.utils.FileMerge;
+import org.apache.commons.io.FileUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+
+import static org.arvados.client.test.utils.FileTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.when;
+
+@RunWith(MockitoJUnitRunner.class)
+public class FileDownloaderTest {
+
+    static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules();
+    private Collection collectionToDownload;
+    private ManifestStream manifestStream;
+
+    @Mock
+    private CollectionsApiClient collectionsApiClient;
+    @Mock
+    private KeepClient keepClient;
+    @Mock
+    private KeepWebApiClient keepWebApiClient;
+    @Mock
+    private ManifestDecoder manifestDecoder;
+    @InjectMocks
+    private FileDownloader fileDownloader;
+
+    @Before
+    public void setUp() throws Exception {
+        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);
+
+        collectionToDownload = prepareCollection();
+        manifestStream = prepareManifestStream();
+    }
+
+    @Test
+    public void downloadingAllFilesFromCollectionWorksProperly() throws Exception {
+        // given
+        List<File> files = generatePredefinedFiles();
+        byte[] dataChunk = prepareDataChunk(files);
+
+        //having
+        when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload);
+        when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream));
+        when(keepClient.getDataChunk(manifestStream.getKeepLocators().get(0))).thenReturn(dataChunk);
+
+        //when
+        List<File> downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
+
+        //then
+        Assert.assertEquals(3, downloadedFiles.size()); // 3 files downloaded
+
+        File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid());
+        Assert.assertTrue(collectionDir.exists()); // collection directory created
+
+        // 3 files correctly saved
+        assertThat(downloadedFiles).allMatch(File::exists);
+
+        for(int i = 0; i < downloadedFiles.size(); i ++) {
+            File downloaded = new File(collectionDir + Characters.SLASH + files.get(i).getName());
+            Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
+        }
+    }
+
+    @Test
+    public void downloadingSingleFileFromKeepWebWorksCorrectly() throws Exception{
+        //given
+        File file = generatePredefinedFiles().get(0);
+
+        //having
+        when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload);
+        when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream));
+        when(keepWebApiClient.download(collectionToDownload.getUuid(), file.getName())).thenReturn(FileUtils.readFileToByteArray(file));
+
+        //when
+        File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
+
+        //then
+        Assert.assertTrue(downloadedFile.exists());
+        Assert.assertEquals(file.getName(), downloadedFile.getName());
+        Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);
+        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);
+    }
+
+    private Collection prepareCollection() throws IOException {
+        // collection that will be returned by mocked collectionsApiClient
+        String filePath = "src/test/resources/org/arvados/client/api/client/collections-download-file.json";
+        File jsonFile = new File(filePath);
+        return MAPPER.readValue(jsonFile, Collection.class);
+    }
+
+    private ManifestStream prepareManifestStream() throws Exception {
+        // manifestStream that will be returned by mocked manifestDecoder
+        List<FileToken> fileTokens = new ArrayList<>();
+        fileTokens.add(new FileToken("0:1024:test-file1"));
+        fileTokens.add(new FileToken("1024:20480:test-file2"));
+        fileTokens.add(new FileToken("21504:1048576:test-file\\0403"));
+
+        KeepLocator keepLocator = new KeepLocator("163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb");
+        return new ManifestStream(".", Arrays.asList(keepLocator), fileTokens);
+    }
+
+    private byte[] prepareDataChunk(List<File> files) throws IOException {
+        File combinedFile = new File(FILE_SPLIT_TEST_DIR + Characters.SLASH + UUID.randomUUID());
+        FileMerge.merge(files, combinedFile);
+        return FileUtils.readFileToByteArray(combinedFile);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepClientTest.java
new file mode 100644 (file)
index 0000000..e4e7bf2
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+import org.apache.commons.io.FileUtils;
+import org.arvados.client.config.FileConfigProvider;
+import org.arvados.client.config.ConfigProvider;
+import org.arvados.client.exception.ArvadosClientException;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.io.File;
+
+import static junit.framework.TestCase.fail;
+import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
+import static org.assertj.core.api.Assertions.assertThat;
+
+@RunWith(MockitoJUnitRunner.class)
+public class KeepClientTest extends ArvadosClientMockedWebServerTest {
+
+    private ConfigProvider configProvider = new FileConfigProvider();
+    private static final String TEST_FILE_PATH ="src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt";
+
+    @InjectMocks
+    private KeepClient keepClient  = new KeepClient(configProvider);
+
+    @Mock
+    private KeepLocator keepLocator;
+
+    @Test
+    public void uploadedFile() throws Exception {
+        // given
+        server.enqueue(getResponse("keep-services-accessible"));
+        server.enqueue(new MockResponse().setBody("0887c78c7d6c1a60ac0b3709a4302ee4"));
+
+        // when
+        String actual = keepClient.put(new File(TEST_FILE_PATH), 1, 0);
+
+        // then
+        assertThat(actual).isEqualTo("0887c78c7d6c1a60ac0b3709a4302ee4");
+    }
+
+    @Test
+    public void fileIsDownloaded() throws Exception {
+        //given
+        File data = new File(TEST_FILE_PATH);
+        byte[] fileBytes = FileUtils.readFileToByteArray(data);
+
+        // when
+        server.enqueue(getResponse("keep-services-accessible"));
+        server.enqueue(new MockResponse().setBody(new Buffer().write(fileBytes)));
+
+        byte[] actual = keepClient.getDataChunk(keepLocator);
+
+        Assert.assertArrayEquals(fileBytes, actual);
+    }
+
+    @Test
+    public void fileIsDownloadedWhenFirstServerDoesNotRespond() throws Exception {
+        // given
+        File data = new File(TEST_FILE_PATH);
+        byte[] fileBytes = FileUtils.readFileToByteArray(data);
+        server.enqueue(getResponse("keep-services-accessible")); // two servers accessible
+        server.enqueue(new MockResponse().setResponseCode(404)); // first one not responding
+        server.enqueue(new MockResponse().setBody(new Buffer().write(fileBytes))); // second one responding
+
+        //when
+        byte[] actual = keepClient.getDataChunk(keepLocator);
+
+        //then
+        Assert.assertArrayEquals(fileBytes, actual);
+    }
+
+    @Test
+    public void exceptionIsThrownWhenNoServerResponds() throws Exception {
+        //given
+        File data = new File(TEST_FILE_PATH);
+        server.enqueue(getResponse("keep-services-accessible")); // two servers accessible
+        server.enqueue(new MockResponse().setResponseCode(404)); // first one not responding
+        server.enqueue(new MockResponse().setResponseCode(404)); // second one not responding
+
+        try {
+            //when
+            keepClient.getDataChunk(keepLocator);
+            fail();
+        } catch (ArvadosClientException e) {
+            //then
+            Assert.assertEquals("No server responding. Unable to download data chunk.", e.getMessage());
+        }
+    }
+
+    @Test
+    public void exceptionIsThrownWhenThereAreNoServersAccessible() throws Exception {
+        //given
+        server.enqueue(getResponse("keep-services-not-accessible")); // no servers accessible
+
+        try {
+            //when
+            keepClient.getDataChunk(keepLocator);
+            fail();
+        } catch (ArvadosClientException e) {
+            //then
+            Assert.assertEquals("No gateway services available!", e.getMessage());
+        }
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java
new file mode 100644 (file)
index 0000000..c4c48da
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.logic.keep;
+
+import org.junit.Test;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class KeepLocatorTest {
+
+    private KeepLocator locator;
+
+    @Test
+    public void md5sumIsExtracted() throws Exception {
+
+        // given
+        locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70");
+
+        // when
+        String actual = locator.getMd5sum();
+
+        // then
+        assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9");
+    }
+
+    @Test
+    public void locatorIsStrippedWithMd5sumAndSize() throws Exception {
+
+        // given
+        locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70");
+
+        // when
+        String actual = locator.stripped();
+
+        // then
+        assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9+70");
+    }
+
+
+    @Test
+    public void locatorToStringProperlyShowing() throws Exception {
+
+        // given
+        locator = new KeepLocator("7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f");
+
+        // when
+        String actual = locator.toString();
+
+        // then
+        assertThat(actual).isEqualTo("7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f");
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java
new file mode 100644 (file)
index 0000000..ac7dd02
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+import org.arvados.client.config.FileConfigProvider;
+import okhttp3.mockwebserver.MockResponse;
+import okhttp3.mockwebserver.RecordedRequest;
+import org.apache.commons.io.FileUtils;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public final class ApiClientTestUtils {
+
+    static final String BASE_URL = "/arvados/v1/";
+
+    private ApiClientTestUtils() {}
+
+    public static MockResponse getResponse(String filename) throws IOException {
+        String filePath = String.format("src/test/resources/org/arvados/client/api/client/%s.json", filename);
+        File jsonFile = new File(filePath);
+        String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset());
+        return new MockResponse().setBody(json);
+    }
+
+    public static void assertAuthorizationHeader(RecordedRequest request) {
+        assertThat(request.getHeader("authorization")).isEqualTo("OAuth2 " + new FileConfigProvider().getApiToken());
+    }
+
+    public static void assertRequestPath(RecordedRequest request, String subPath) {
+        assertThat(request.getPath()).isEqualTo(BASE_URL + subPath);
+    }
+
+    public static void assertRequestMethod(RecordedRequest request, RequestMethod requestMethod) {
+        assertThat(request.getMethod()).isEqualTo(requestMethod.name());
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java
new file mode 100644 (file)
index 0000000..59bd446
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+import org.arvados.client.config.FileConfigProvider;
+import org.arvados.client.facade.ArvadosFacade;
+import org.junit.BeforeClass;
+
+import static org.junit.Assert.assertTrue;
+
+public class ArvadosClientIntegrationTest {
+
+    protected static final FileConfigProvider CONFIG = new FileConfigProvider("integration-tests-application.conf");
+    protected static final ArvadosFacade FACADE = new ArvadosFacade(CONFIG);
+    protected static final String PROJECT_UUID = CONFIG.getIntegrationTestProjectUuid();
+
+    @BeforeClass
+    public static void validateConfiguration(){
+        String msg = " info must be provided in configuration";
+        CONFIG.getConfig().entrySet()
+                .forEach(e -> assertTrue("Parameter " + e.getKey() + msg, !e.getValue().render().equals("\"\"")));
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java
new file mode 100644 (file)
index 0000000..74324b6
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+import okhttp3.mockwebserver.MockWebServer;
+import org.junit.After;
+import org.junit.Before;
+
+public class ArvadosClientMockedWebServerTest extends ArvadosClientUnitTest {
+    private static final int PORT = CONFIG.getApiPort();
+    protected MockWebServer server = new MockWebServer();
+
+    @Before
+    public void setUpServer() throws Exception {
+        server.start(PORT);
+    }
+    
+    @After
+    public void tearDownServer() throws Exception {
+        server.shutdown();
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java
new file mode 100644 (file)
index 0000000..67566b6
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+import org.arvados.client.config.FileConfigProvider;
+import org.junit.BeforeClass;
+
+import static org.junit.Assert.assertTrue;
+
+public class ArvadosClientUnitTest {
+
+    protected static final FileConfigProvider CONFIG = new FileConfigProvider("application.conf");
+
+    @BeforeClass
+    public static void validateConfiguration(){
+        String msg = " info must be provided in configuration";
+        CONFIG.getConfig().entrySet().forEach(e -> assertTrue("Parameter " + e.getKey() + msg, !e.getValue().render().equals("\"\"")));
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java
new file mode 100644 (file)
index 0000000..2953450
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+import org.apache.commons.io.FileUtils;
+import org.assertj.core.util.Lists;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.List;
+
+public class FileTestUtils {
+
+    public static final String FILE_SPLIT_TEST_DIR = "/tmp/file-split";
+    public static final String FILE_DOWNLOAD_TEST_DIR = "/tmp/arvados-downloaded";
+    public static final String TEST_FILE = FILE_SPLIT_TEST_DIR + "/test-file";
+    public static long ONE_FOURTH_GB = FileUtils.ONE_GB / 4;
+    public static long ONE_EIGTH_GB = FileUtils.ONE_GB / 8;
+    public static long HALF_GB = FileUtils.ONE_GB / 2;
+    public static int FILE_SPLIT_SIZE = 64;
+
+    public static void createDirectory(String path) throws Exception {
+        new File(path).mkdirs();
+    }
+
+    public static void cleanDirectory(String directory) throws Exception {
+        FileUtils.cleanDirectory(new File(directory));
+    }
+    
+    public static File generateFile(String path, long length) throws IOException {
+        RandomAccessFile testFile = new RandomAccessFile(path, "rwd");
+        testFile.setLength(length);
+        testFile.close();
+        return new File(path);
+    }
+    
+    public static List<File> generatePredefinedFiles() throws IOException {
+        return Lists.newArrayList(
+                generateFile(TEST_FILE + 1, FileUtils.ONE_KB),
+                generateFile(TEST_FILE + 2, FileUtils.ONE_KB * 20),
+                generateFile(TEST_FILE + " " + 3, FileUtils.ONE_MB)
+            );
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java b/sdk/java-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java
new file mode 100644 (file)
index 0000000..53249c9
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.test.utils;
+
+public enum RequestMethod {
+    
+    GET, POST, PUT, DELETE
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java b/sdk/java-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java
new file mode 100644 (file)
index 0000000..00ca0b2
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.utils;
+
+import org.arvados.client.test.utils.FileTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.List;
+
+import static org.arvados.client.test.utils.FileTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class FileMergeTest {
+
+    @Before
+    public void setUp() throws Exception {
+        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);
+    }
+
+    @Test
+    public void fileChunksAreMergedIntoOneFile() throws Exception {
+
+        // given
+        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB);
+
+        List<File> files = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE);
+        File targetFile = new File(TEST_FILE);
+
+        // when
+        FileMerge.merge(files, targetFile);
+
+        // then
+        assertThat(targetFile.length()).isEqualTo(FileTestUtils.ONE_EIGTH_GB);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);
+    }
+}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java b/sdk/java-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java
new file mode 100644 (file)
index 0000000..4cc523c
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.utils;
+
+import org.arvados.client.test.utils.FileTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.List;
+
+import static org.arvados.client.test.utils.FileTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class FileSplitTest {
+
+    @Before
+    public void setUp() throws Exception {
+        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);
+    }
+
+    @Test
+    public void fileIsDividedIntoSmallerChunks() throws Exception {
+
+        // given
+        int expectedSize = 2;
+        int expectedFileSizeInBytes = 67108864;
+        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB);
+
+        // when
+        List<File> actual = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE);
+
+        // then
+        assertThat(actual).hasSize(expectedSize);
+        assertThat(actual).allMatch(a -> a.length() == expectedFileSizeInBytes);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);
+    }
+}
diff --git a/sdk/java-v2/src/test/resources/application.conf b/sdk/java-v2/src/test/resources/application.conf
new file mode 100644 (file)
index 0000000..f19f3dc
--- /dev/null
@@ -0,0 +1,10 @@
+# configuration for unit tests
+
+arvados {
+    api {
+        port = 9000
+        keepweb-port = 9000
+        token = 1m69yw9m2wanubzyfkb1e9icplqhtr2r969bu9rnzqbqhb7cnb
+        protocol = "http"
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/integration-tests-application.conf b/sdk/java-v2/src/test/resources/integration-tests-application.conf
new file mode 100644 (file)
index 0000000..2f934d4
--- /dev/null
@@ -0,0 +1,23 @@
+# Configuration for integration tests
+#
+# Remarks:
+# * For example see integration-tests-application.conf.example
+# * While providing data remove apostrophes ("") from each line
+# * See Arvados documentation for information how to obtain a token:
+#   https://doc.arvados.org/user/reference/api-tokens.html
+#
+
+arvados {
+    api {
+        keepweb-host = ""
+        keepweb-port = 443
+        host = ""
+        port = 443
+        token = ""
+        protocol = https
+        host-insecure = false
+    }
+    integration-tests {
+        project-uuid = ""
+    }
+}
diff --git a/sdk/java-v2/src/test/resources/integration-tests-application.conf.example b/sdk/java-v2/src/test/resources/integration-tests-application.conf.example
new file mode 100644 (file)
index 0000000..e579918
--- /dev/null
@@ -0,0 +1,16 @@
+# example configuration for integration tests
+
+arvados {
+    api {
+        keepweb-host = collections.ardev.mycompany.com
+        keepweb-port = 443
+        host = api.ardev.mycompany.com
+        port = 443
+        token = mytoken
+        protocol = https
+        host-insecure = false
+    }
+    integration-tests {
+        project-uuid = ardev-j7d0g-aa123f81q6y7skk
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/sdk/java-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker
new file mode 100644 (file)
index 0000000..ca6ee9c
--- /dev/null
@@ -0,0 +1 @@
+mock-maker-inline
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json
new file mode 100644 (file)
index 0000000..68dce30
--- /dev/null
@@ -0,0 +1,22 @@
+{
+    "href": "/collections/112ci-4zz18-12tncxzptzbec1p",
+    "kind": "arvados#collection",
+    "etag": "bqoujj7oybdx0jybwvtsebj7y",
+    "uuid": "112ci-4zz18-12tncxzptzbec1p",
+    "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "created_at": "2017-11-21T13:38:56.521853000Z",
+    "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+    "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "modified_at": "2017-11-21T13:38:56.521853000Z",
+    "name": "Super Collection",
+    "description": null,
+    "properties": {},
+    "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+    "manifest_text": ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n",
+    "replication_desired": null,
+    "replication_confirmed": null,
+    "replication_confirmed_at": null,
+    "delete_at": null,
+    "trash_at": null,
+    "is_trashed": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json
new file mode 100644 (file)
index 0000000..57a2ee5
--- /dev/null
@@ -0,0 +1,22 @@
+{
+    "href": "/collections/112ci-4zz18-12tncxzptzbec1p",
+    "kind": "arvados#collection",
+    "etag": "bqoujj7oybdx0jybwvtsebj7y",
+    "uuid": "112ci-4zz18-12tncxzptzbec1p",
+    "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "created_at": "2017-11-21T13:38:56.521853000Z",
+    "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+    "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "modified_at": "2017-11-21T13:38:56.521853000Z",
+    "name": "Super Collection",
+    "description": null,
+    "properties": {},
+    "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+    "manifest_text": "",
+    "replication_desired": null,
+    "replication_confirmed": null,
+    "replication_confirmed_at": null,
+    "delete_at": null,
+    "trash_at": null,
+    "is_trashed": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json
new file mode 100644 (file)
index 0000000..1fed383
--- /dev/null
@@ -0,0 +1,22 @@
+{
+  "href": "/collections/ardev-4zz18-jk5vo4uo9u5vj52",
+  "kind": "arvados#collection",
+  "etag": "2vm76dxmzr23u9774iguuxsrg",
+  "uuid": "ardev-4zz18-jk5vo4uo9u5vj52",
+  "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "created_at": "2018-02-19T11:00:00.852389000Z",
+  "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+  "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "modified_at": "2018-02-19T11:00:00.852389000Z",
+  "name": "New Collection (2018-02-19 12:00:00.273)",
+  "description": null,
+  "properties": {},
+  "portable_data_hash": "49581091dfad651945c12b08d4735d88+112",
+  "manifest_text": ". 163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\0403\n",
+  "replication_desired": null,
+  "replication_confirmed": null,
+  "replication_confirmed_at": null,
+  "delete_at": null,
+  "trash_at": null,
+  "is_trashed": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-get.json
new file mode 100644 (file)
index 0000000..e8fdd83
--- /dev/null
@@ -0,0 +1,22 @@
+{
+    "href": "/collections/112ci-4zz18-p51w7z3fpopo6sm",
+    "kind": "arvados#collection",
+    "etag": "52tk5yg024cwhkkcidu3zcmj2",
+    "uuid": "112ci-4zz18-p51w7z3fpopo6sm",
+    "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "created_at": "2017-11-15T10:36:03.554356000Z",
+    "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+    "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+    "modified_at": "2017-11-15T10:36:03.554356000Z",
+    "name": "Collection With Manifest #2",
+    "description": null,
+    "properties": {},
+    "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143",
+    "manifest_text": ". 66c9daa69630e092e9ce554b7aae8a20+524288+A4a15ffea58f259e09f68d3f7eea29942750a79d0@5a269ff6 435f38dd384b06c248feabee0cabca52+524288+A8a99e8148bd368c49901526098901bb7d7890c3b@5a269ff6 dc5b6c104aab35fff6d70a4dadc28d37+391727+Ab0662d549c422c983fccaad02b4ade7b48a8255b@5a269ff6 0:1440303:lombok.jar\n",
+    "replication_desired": null,
+    "replication_confirmed": null,
+    "replication_confirmed_at": null,
+    "delete_at": null,
+    "trash_at": null,
+    "is_trashed": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/collections-list.json
new file mode 100644 (file)
index 0000000..86a3bda
--- /dev/null
@@ -0,0 +1,871 @@
+{
+    "kind": "arvados#collectionList",
+    "etag": "",
+    "self_link": "",
+    "offset": 0,
+    "limit": 100,
+    "items": [
+        {
+            "href": "/collections/112ci-4zz18-x6xfmvz0chnkzgv",
+            "kind": "arvados#collection",
+            "etag": "8xyiwnih5b5vzmj5sa33348a7",
+            "uuid": "112ci-4zz18-x6xfmvz0chnkzgv",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-15T13:06:36.934337000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-15T13:06:36.934337000Z",
+            "name": "Collection With Manifest #3",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-p51w7z3fpopo6sm",
+            "kind": "arvados#collection",
+            "etag": "8cmhep8aixe4p42pxjoct5502",
+            "uuid": "112ci-4zz18-p51w7z3fpopo6sm",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-15T10:36:03.554356000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-15T10:36:03.554356000Z",
+            "name": "Collection With Manifest #2",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "6c4106229b08fe25f48b3a7a8289dd46+143",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-xb6gf2yraln7cwa",
+            "kind": "arvados#collection",
+            "etag": "de2ol2dyvsba3mn46al760cyg",
+            "uuid": "112ci-4zz18-xb6gf2yraln7cwa",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-15T09:32:44.146172000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-15T09:32:44.146172000Z",
+            "name": "New collection",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-r5jfktpn3a9o0ap",
+            "kind": "arvados#collection",
+            "etag": "dby68gd0vatvi090cu0axvtq3",
+            "uuid": "112ci-4zz18-r5jfktpn3a9o0ap",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-14T13:00:35.431046000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-14T13:00:35.431046000Z",
+            "name": "Collection With Manifest #1",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "3c59518bf8e1100d420488d822682b4a+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-nqxk8xjn6mtskzt",
+            "kind": "arvados#collection",
+            "etag": "2b34uzau862w862a2rv36agv6",
+            "uuid": "112ci-4zz18-nqxk8xjn6mtskzt",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-14T12:59:34.767068000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-14T12:59:34.767068000Z",
+            "name": "Empty Collection #2",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-rs9bcf5qnyfjrkm",
+            "kind": "arvados#collection",
+            "etag": "60aywazztwfspnasltufcjxpa",
+            "uuid": "112ci-4zz18-rs9bcf5qnyfjrkm",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-14T12:52:33.124452000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-14T12:52:33.124452000Z",
+            "name": "Empty Collection #1",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-af656lee4kv7q2m",
+            "kind": "arvados#collection",
+            "etag": "1jward6snif3tsjzftxh8hvwh",
+            "uuid": "112ci-4zz18-af656lee4kv7q2m",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-14T12:09:05.319319000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-14T12:09:05.319319000Z",
+            "name": "create example",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-y2zqix7k9an7nro",
+            "kind": "arvados#collection",
+            "etag": "zs2n4zliu6nb5yk3rw6h5ugw",
+            "uuid": "112ci-4zz18-y2zqix7k9an7nro",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-13T16:59:02.299257000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-13T16:59:02.299257000Z",
+            "name": "Saved at 2017-11-13 16:59:01 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-wq77jfi62u5i4rv",
+            "kind": "arvados#collection",
+            "etag": "eijhemzgy44ofmu0dtrowl604",
+            "uuid": "112ci-4zz18-wq77jfi62u5i4rv",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-13T16:58:10.637548000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-13T16:58:10.637548000Z",
+            "name": "Saved at 2017-11-13 16:58:07 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-unaeckkjgeg7ui0",
+            "kind": "arvados#collection",
+            "etag": "1oq7ye0gfbf3ih6y864w3n683",
+            "uuid": "112ci-4zz18-unaeckkjgeg7ui0",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-10T09:43:07.583862000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-10T09:43:07.583862000Z",
+            "name": "Saved at 2017-11-10 09:43:03 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-5y6atonkxq55lms",
+            "kind": "arvados#collection",
+            "etag": "4qmqlro878yx8q7ikhilo8qwn",
+            "uuid": "112ci-4zz18-5y6atonkxq55lms",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T12:46:15.245770000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T12:46:15.245770000Z",
+            "name": "Saved at 2017-11-09 12:46:13 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-b3fjqd01pxjvseo",
+            "kind": "arvados#collection",
+            "etag": "91v698hngoz241c38bbmh0ogc",
+            "uuid": "112ci-4zz18-b3fjqd01pxjvseo",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:54:07.259998000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:54:07.259998000Z",
+            "name": "Saved at 2017-11-09 11:54:04 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-cwfxl8h41q18n65",
+            "kind": "arvados#collection",
+            "etag": "215t842ckrrgjpxrxr4j0gsui",
+            "uuid": "112ci-4zz18-cwfxl8h41q18n65",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:49:38.276888000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:49:38.276888000Z",
+            "name": "Saved at 2017-11-09 11:49:35 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-uv4xu08739tn1vy",
+            "kind": "arvados#collection",
+            "etag": "90z6i3oqv197osng3wvjjir3t",
+            "uuid": "112ci-4zz18-uv4xu08739tn1vy",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:43:05.917513000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:43:05.917513000Z",
+            "name": "Saved at 2017-11-09 11:43:05 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-pzisn8c5mefzczv",
+            "kind": "arvados#collection",
+            "etag": "5lcf6wvc3wypwobswdz22wen",
+            "uuid": "112ci-4zz18-pzisn8c5mefzczv",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:40:38.804718000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:40:38.804718000Z",
+            "name": "Saved at 2017-11-09 11:40:36 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-mj24uwtnqqrno27",
+            "kind": "arvados#collection",
+            "etag": "98s08xew49avui1gy3mzit8je",
+            "uuid": "112ci-4zz18-mj24uwtnqqrno27",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:40:25.189869000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:40:25.189869000Z",
+            "name": "Saved at 2017-11-09 11:40:24 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-oco162516upgqng",
+            "kind": "arvados#collection",
+            "etag": "a09wnvl4i51xqx7u9yf4qbi94",
+            "uuid": "112ci-4zz18-oco162516upgqng",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:39:04.148785000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:39:04.148785000Z",
+            "name": "Saved at 2017-11-09 11:39:03 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-tlze7dgczsdwkep",
+            "kind": "arvados#collection",
+            "etag": "4ee2xudbc5rkr597drgu9tg10",
+            "uuid": "112ci-4zz18-tlze7dgczsdwkep",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:37:59.478975000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:37:59.478975000Z",
+            "name": "Saved at 2017-11-09 11:37:58 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-nq0kxi9d7w64la1",
+            "kind": "arvados#collection",
+            "etag": "5aa3evnbceo3brnps2e1sq8ts",
+            "uuid": "112ci-4zz18-nq0kxi9d7w64la1",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:32:23.329259000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:32:23.329259000Z",
+            "name": "Saved at 2017-11-09 11:32:22 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-fks9mewtw155pvx",
+            "kind": "arvados#collection",
+            "etag": "97vicgogv8bovmk4s2jymsdq",
+            "uuid": "112ci-4zz18-fks9mewtw155pvx",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:30:17.589462000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:30:17.589462000Z",
+            "name": "Saved at 2017-11-09 11:30:17 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-kp356e0q2wdl2df",
+            "kind": "arvados#collection",
+            "etag": "btktwjclv063s1rd6duvk51v3",
+            "uuid": "112ci-4zz18-kp356e0q2wdl2df",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:29:26.820481000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:29:26.820481000Z",
+            "name": "Saved at 2017-11-09 11:29:25 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-0ey8ob38xf7surq",
+            "kind": "arvados#collection",
+            "etag": "bob83na42pufqli1a5buxryvm",
+            "uuid": "112ci-4zz18-0ey8ob38xf7surq",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:08:53.781498000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:08:53.781498000Z",
+            "name": "Saved at 2017-11-09 11:08:52 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-wu2n0fv3cewna1n",
+            "kind": "arvados#collection",
+            "etag": "7pl1x327eeutqtsjppdj284g8",
+            "uuid": "112ci-4zz18-wu2n0fv3cewna1n",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T11:08:33.423284000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T11:08:33.423284000Z",
+            "name": "Saved at 2017-11-09 11:08:33 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-hyybo6yuvkx4hrm",
+            "kind": "arvados#collection",
+            "etag": "2wg1wn2o18ubrgbhbqwwsslhf",
+            "uuid": "112ci-4zz18-hyybo6yuvkx4hrm",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:44:53.096798000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:44:53.096798000Z",
+            "name": "Saved at 2017-11-09 10:44:51 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-h3gjq7gzd4syanw",
+            "kind": "arvados#collection",
+            "etag": "8jk0at4e69cwjyjamvm4wz2oj",
+            "uuid": "112ci-4zz18-h3gjq7gzd4syanw",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:41:31.278281000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:41:31.278281000Z",
+            "name": "Saved at 2017-11-09 10:41:30 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-jinwyyaeigjs1yg",
+            "kind": "arvados#collection",
+            "etag": "be57zhzufz2hp1tbdwidoro5j",
+            "uuid": "112ci-4zz18-jinwyyaeigjs1yg",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:41:07.083017000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:41:07.083017000Z",
+            "name": "Saved at 2017-11-09 10:41:06 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-etf8aghyxlfxvo1",
+            "kind": "arvados#collection",
+            "etag": "29lj2roie4cygo5ffgrduflly",
+            "uuid": "112ci-4zz18-etf8aghyxlfxvo1",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:40:31.710865000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:40:31.710865000Z",
+            "name": "Saved at 2017-11-09 10:40:31 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-jtbn4edpkkhbm9b",
+            "kind": "arvados#collection",
+            "etag": "6div78e1nhusii4x1xkp3rg2v",
+            "uuid": "112ci-4zz18-jtbn4edpkkhbm9b",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:39:36.999602000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:39:36.999602000Z",
+            "name": "Saved at 2017-11-09 10:39:36 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-whdleimp34hiqp6",
+            "kind": "arvados#collection",
+            "etag": "12wlbsxlmy3sze4v2m0ua7ake",
+            "uuid": "112ci-4zz18-whdleimp34hiqp6",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:19:52.879907000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:19:52.879907000Z",
+            "name": "Saved at 2017-11-09 10:19:52 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-kj8dz72zpo5kbtm",
+            "kind": "arvados#collection",
+            "etag": "9bv1bw9afb3w84gu55uzcgd6h",
+            "uuid": "112ci-4zz18-kj8dz72zpo5kbtm",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T10:16:31.558621000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T10:16:31.558621000Z",
+            "name": "Saved at 2017-11-09 10:16:30 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "5ba3fc508718fabfa20d24390fe31856+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-tr306nau9hrr437",
+            "kind": "arvados#collection",
+            "etag": "683d77tvlhe97etk9bk2bx8ds",
+            "uuid": "112ci-4zz18-tr306nau9hrr437",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:59:44.978811000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:59:44.978811000Z",
+            "name": "Saved at 2017-11-09 09:59:44 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-oxuk69569mxztp0",
+            "kind": "arvados#collection",
+            "etag": "1m34v9jbna2v7gv7auio54i8w",
+            "uuid": "112ci-4zz18-oxuk69569mxztp0",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:59:30.774888000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:59:30.774888000Z",
+            "name": "Saved at 2017-11-09 09:59:30 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-wf8sl6xbyfwjyer",
+            "kind": "arvados#collection",
+            "etag": "7l2a9fhqmxg7ghn7osx0s19v4",
+            "uuid": "112ci-4zz18-wf8sl6xbyfwjyer",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:58:21.496088000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:58:21.496088000Z",
+            "name": "Saved at 2017-11-09 09:58:20 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-drpia2es1hp9ydi",
+            "kind": "arvados#collection",
+            "etag": "33dw426fhs2vlb50b6301ukn0",
+            "uuid": "112ci-4zz18-drpia2es1hp9ydi",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:56:08.506505000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:56:08.506505000Z",
+            "name": "Saved at 2017-11-09 09:56:08 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-5b4px2i2dwyidfi",
+            "kind": "arvados#collection",
+            "etag": "2437tnhn2gmti52lpm8nfq9ct",
+            "uuid": "112ci-4zz18-5b4px2i2dwyidfi",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:54:06.651026000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:54:06.651026000Z",
+            "name": "Saved at 2017-11-09 09:54:06 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-94oslnwnxe1f9wp",
+            "kind": "arvados#collection",
+            "etag": "7e0k48zu93o57zudxjp1yrgjq",
+            "uuid": "112ci-4zz18-94oslnwnxe1f9wp",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:40:04.240297000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:40:04.240297000Z",
+            "name": "Saved at 2017-11-09 09:39:58 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-2fk0d5d4jjc1fmq",
+            "kind": "arvados#collection",
+            "etag": "cuirr803f54e89reakuq50oaq",
+            "uuid": "112ci-4zz18-2fk0d5d4jjc1fmq",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:36:14.952671000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:36:14.952671000Z",
+            "name": "Saved at 2017-11-09 09:36:08 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-xp9pu81xyc5h422",
+            "kind": "arvados#collection",
+            "etag": "3bi5xd8ezxrazk5266cwzn4s4",
+            "uuid": "112ci-4zz18-xp9pu81xyc5h422",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:35:29.552746000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:35:29.552746000Z",
+            "name": "Saved at 2017-11-09 09:35:29 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-znb4lo0if2as58c",
+            "kind": "arvados#collection",
+            "etag": "59uaoxy6uh82i6lrvr3ht8gz1",
+            "uuid": "112ci-4zz18-znb4lo0if2as58c",
+            "owner_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "created_at": "2017-11-09T09:31:08.109971000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-09T09:31:08.109971000Z",
+            "name": "Saved at 2017-11-09 09:31:06 UTC by VirtualBox",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "67cbebb9f739b6b06ca056d21115cf43+53",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-6pvl5ea5u932qzi",
+            "kind": "arvados#collection",
+            "etag": "dksrh8jznxoaidl29i1vv5904",
+            "uuid": "112ci-4zz18-6pvl5ea5u932qzi",
+            "owner_uuid": "112ci-j7d0g-tw71k7mxii6fqgx",
+            "created_at": "2017-11-08T12:48:32.238698000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-f4633qdjs6w8zcy",
+            "modified_by_user_uuid": "112ci-tpzed-nd84czdo4iea1mz",
+            "modified_at": "2017-11-08T12:50:23.946608000Z",
+            "name": "New collection",
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "18c037c51c3f74be53ea2b115afd0c5f+69",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        },
+        {
+            "href": "/collections/112ci-4zz18-wq5pyrxfv1t9isu",
+            "kind": "arvados#collection",
+            "etag": "1w1rhhd6oql4ceb7h9t16sf0q",
+            "uuid": "112ci-4zz18-wq5pyrxfv1t9isu",
+            "owner_uuid": "112ci-j7d0g-anonymouspublic",
+            "created_at": "2017-11-03T10:03:20.364737000Z",
+            "modified_by_client_uuid": null,
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:03:20.364737000Z",
+            "name": null,
+            "description": null,
+            "properties": {},
+            "portable_data_hash": "d41d8cd98f00b204e9800998ecf8427e+0",
+            "replication_desired": null,
+            "replication_confirmed": null,
+            "replication_confirmed_at": null,
+            "delete_at": null,
+            "trash_at": null,
+            "is_trashed": false
+        }
+    ],
+    "items_available": 41
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-get.json
new file mode 100644 (file)
index 0000000..f1834e7
--- /dev/null
@@ -0,0 +1,21 @@
+{
+  "href": "/groups/ardev-j7d0g-bmg3pfqtx3ivczp",
+  "kind": "arvados#group",
+  "etag": "3hw0vk4mbl0ofvia5k6x4dwrx",
+  "uuid": "ardev-j7d0g-bmg3pfqtx3ivczp",
+  "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "created_at": "2018-03-29T11:09:05.984597000Z",
+  "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+  "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "modified_at": "2018-03-29T11:09:05.984597000Z",
+  "name": "TestGroup1",
+  "group_class": "project",
+  "description": null,
+  "writable_by": [
+    "ardev-tpzed-n3kzq4fvoks3uw4",
+    "ardev-tpzed-n3kzq4fvoks3uw4"
+  ],
+  "delete_at": null,
+  "trash_at": null,
+  "is_trashed": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/groups-list.json
new file mode 100644 (file)
index 0000000..fa74e1c
--- /dev/null
@@ -0,0 +1,430 @@
+{
+  "kind": "arvados#groupList",
+  "etag": "",
+  "self_link": "",
+  "offset": 0,
+  "limit": 100,
+  "items": [
+    {
+      "href": "/groups/ardev-j7d0g-ylx7wnu1moge2di",
+      "kind": "arvados#group",
+      "etag": "68vubv3iw7663763bozxebmyf",
+      "uuid": "ardev-j7d0g-ylx7wnu1moge2di",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-18T09:09:21.126649000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-18T09:09:21.126649000Z",
+      "name": "TestProject1",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-mnzhga726itrbrq",
+      "kind": "arvados#group",
+      "etag": "68q7r8r37u9hckr2zsynvton3",
+      "uuid": "ardev-j7d0g-mnzhga726itrbrq",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T12:11:24.389594000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T12:11:24.389594000Z",
+      "name": "TestProject2",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-0w9m1sz46ljtdnm",
+      "kind": "arvados#group",
+      "etag": "ef4vzx5gyudkrg9zml0zdv6qu",
+      "uuid": "ardev-j7d0g-0w9m1sz46ljtdnm",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T12:08:39.066802000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T12:08:39.066802000Z",
+      "name": "TestProject3",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-r20iem5ou6h5wao",
+      "kind": "arvados#group",
+      "etag": "6h6h4ta6yyf9058delxk8fnqs",
+      "uuid": "ardev-j7d0g-r20iem5ou6h5wao",
+      "owner_uuid": "ardev-j7d0g-j7drd8yikkp6evd",
+      "created_at": "2018-04-17T12:03:39.647244000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T12:03:39.647244000Z",
+      "name": "TestProject4",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-j7d0g-j7drd8yikkp6evd",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-j7drd8yikkp6evd",
+      "kind": "arvados#group",
+      "etag": "6se2y8f9o7uu06pbopgq56xds",
+      "uuid": "ardev-j7d0g-j7drd8yikkp6evd",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T11:58:31.339515000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T11:58:31.339515000Z",
+      "name": "TestProject5",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-kh1g7i5va870xt0",
+      "kind": "arvados#group",
+      "etag": "2si26vaig3vig9266pqkqh2gy",
+      "uuid": "ardev-j7d0g-kh1g7i5va870xt0",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:56:54.391676000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:56:54.391676000Z",
+      "name": "TestProject6",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-sclkdyuwm4h2m78",
+      "kind": "arvados#group",
+      "etag": "edgnz6q0vt2u3o13ujtfohb75",
+      "uuid": "ardev-j7d0g-sclkdyuwm4h2m78",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:27:15.914517000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:27:15.914517000Z",
+      "name": "TestProject7",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-593khc577zuyyhe",
+      "kind": "arvados#group",
+      "etag": "39ig9ttgec6lbe096uetn2cb9",
+      "uuid": "ardev-j7d0g-593khc577zuyyhe",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:27:03.858203000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:27:03.858203000Z",
+      "name": "TestProject8",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-iotds0tm559dbz7",
+      "kind": "arvados#group",
+      "etag": "1dpr8v6tx6pta0fozq93eyeou",
+      "uuid": "ardev-j7d0g-iotds0tm559dbz7",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:26:25.180623000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:26:25.180623000Z",
+      "name": "TestProject9",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-gbqay74778tonb8",
+      "kind": "arvados#group",
+      "etag": "dizbavs2opfe1wpx6thocfki0",
+      "uuid": "ardev-j7d0g-gbqay74778tonb8",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:26:06.435961000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:26:06.435961000Z",
+      "name": "TestProject10",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-fmq1t0jlznehbdm",
+      "kind": "arvados#group",
+      "etag": "6xue8m3lx9qpptfvdf13val5t",
+      "uuid": "ardev-j7d0g-fmq1t0jlznehbdm",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-17T10:25:55.546399000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-17T10:25:55.546399000Z",
+      "name": "TestProject11",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-vxju56ch64u51gq",
+      "kind": "arvados#group",
+      "etag": "2gqix9e4m023usi9exhrsjx6z",
+      "uuid": "ardev-j7d0g-vxju56ch64u51gq",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-16T14:09:49.700566000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-16T14:09:49.700566000Z",
+      "name": "TestProject12",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-g8m4w0d22gv6fbj",
+      "kind": "arvados#group",
+      "etag": "73n8x82814o6ihld0kltf468d",
+      "uuid": "ardev-j7d0g-g8m4w0d22gv6fbj",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-11T15:02:35.016850000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-11T15:02:35.016850000Z",
+      "name": "TestProject13",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-lstqed4y78khaqm",
+      "kind": "arvados#group",
+      "etag": "91f7uwq7pj3d3ez1u4smjg3ch",
+      "uuid": "ardev-j7d0g-lstqed4y78khaqm",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-06T15:29:27.754408000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-06T15:29:27.754408000Z",
+      "name": "TestProject14",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-0jbezvnq8i07l7p",
+      "kind": "arvados#group",
+      "etag": "7dbxhvbcfaogwnvo8k4mtqthk",
+      "uuid": "ardev-j7d0g-0jbezvnq8i07l7p",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-04-05T09:32:46.946417000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-04-05T09:32:46.946417000Z",
+      "name": "TestProject15",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-72dxer22g6iltqz",
+      "kind": "arvados#group",
+      "etag": "dhfu203rckzdzvx832wm7jv59",
+      "uuid": "ardev-j7d0g-72dxer22g6iltqz",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-03-29T11:27:02.482218000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-03-29T13:17:00.045606000Z",
+      "name": "TestProject16",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-nebzwquxtq1v3o5",
+      "kind": "arvados#group",
+      "etag": "7l9oxbdf4e1m9ddnujokf7czz",
+      "uuid": "ardev-j7d0g-nebzwquxtq1v3o5",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-03-29T11:11:26.235411000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-03-29T11:11:26.235411000Z",
+      "name": "TestProject17",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-5589c8dmxevecqh",
+      "kind": "arvados#group",
+      "etag": "83862x2o4453mja2rvypjl5gv",
+      "uuid": "ardev-j7d0g-5589c8dmxevecqh",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-03-29T11:10:58.496482000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-03-29T11:10:58.496482000Z",
+      "name": "TestProject18",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-bmg3pfqtx3ivczp",
+      "kind": "arvados#group",
+      "etag": "3hw0vk4mbl0ofvia5k6x4dwrx",
+      "uuid": "ardev-j7d0g-bmg3pfqtx3ivczp",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-03-29T11:09:05.984597000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-03-29T11:09:05.984597000Z",
+      "name": "TestProject19",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    },
+    {
+      "href": "/groups/ardev-j7d0g-mfitz2oa4rpycou",
+      "kind": "arvados#group",
+      "etag": "6p9xbxpttj782mpqs537gfvc6",
+      "uuid": "ardev-j7d0g-mfitz2oa4rpycou",
+      "owner_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "created_at": "2018-03-29T11:00:19.809612000Z",
+      "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+      "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+      "modified_at": "2018-03-29T11:00:19.809612000Z",
+      "name": "TestProject20",
+      "group_class": "project",
+      "description": null,
+      "writable_by": [
+        "ardev-tpzed-n3kzq4fvoks3uw4",
+        "ardev-tpzed-n3kzq4fvoks3uw4"
+      ],
+      "delete_at": null,
+      "trash_at": null,
+      "is_trashed": false
+    }
+  ],
+  "items_available": 20
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt
new file mode 100644 (file)
index 0000000..5cbed85
--- /dev/null
@@ -0,0 +1 @@
+Sample text file to test keep client.
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json
new file mode 100644 (file)
index 0000000..d5bd0d8
--- /dev/null
@@ -0,0 +1,42 @@
+{
+    "kind": "arvados#keepServiceList",
+    "etag": "",
+    "self_link": "",
+    "offset": null,
+    "limit": null,
+    "items": [
+        {
+            "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
+            "kind": "arvados#keepService",
+            "etag": "bjzh7og2d9z949lbd38vnnslt",
+            "uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.314229000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.314229000Z",
+            "service_host": "localhost",
+            "service_port": 9000,
+            "service_ssl_flag": false,
+            "service_type": "disk",
+            "read_only": false
+        },
+        {
+            "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
+            "kind": "arvados#keepService",
+            "etag": "7m64l69kko4bytpsykf8cay7t",
+            "uuid": "112ci-bi6l4-f0r03wrqymotwql",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.351577000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.351577000Z",
+            "service_host": "localhost",
+            "service_port": 9001,
+            "service_ssl_flag": false,
+            "service_type": "disk",
+            "read_only": false
+        }
+    ],
+    "items_available": 2
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json
new file mode 100644 (file)
index 0000000..3d95cf9
--- /dev/null
@@ -0,0 +1,42 @@
+{
+    "kind": "arvados#keepServiceList",
+    "etag": "",
+    "self_link": "",
+    "offset": null,
+    "limit": null,
+    "items": [
+        {
+            "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
+            "kind": "arvados#keepService",
+            "etag": "bjzh7og2d9z949lbd38vnnslt",
+            "uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.314229000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.314229000Z",
+            "service_host": "localhost",
+            "service_port": 9000,
+            "service_ssl_flag": false,
+            "service_type": "disk",
+            "read_only": false
+        },
+        {
+            "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
+            "kind": "arvados#keepService",
+            "etag": "7m64l69kko4bytpsykf8cay7t",
+            "uuid": "112ci-bi6l4-f0r03wrqymotwql",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.351577000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.351577000Z",
+            "service_host": "localhost",
+            "service_port": 9000,
+            "service_ssl_flag": false,
+            "service_type": "gpfs",
+            "read_only": false
+        }
+    ],
+    "items_available": 2
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json
new file mode 100644 (file)
index 0000000..f3c2894
--- /dev/null
@@ -0,0 +1,16 @@
+{
+    "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
+    "kind": "arvados#keepService",
+    "etag": "bjzh7og2d9z949lbd38vnnslt",
+    "uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
+    "owner_uuid": "112ci-tpzed-000000000000000",
+    "created_at": "2017-11-03T10:04:48.314229000Z",
+    "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+    "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+    "modified_at": "2017-11-03T10:04:48.314229000Z",
+    "service_host": "10.0.2.15",
+    "service_port": 9000,
+    "service_ssl_flag": false,
+    "service_type": "disk",
+    "read_only": false
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json
new file mode 100644 (file)
index 0000000..90ba916
--- /dev/null
@@ -0,0 +1,58 @@
+{
+    "kind": "arvados#keepServiceList",
+    "etag": "",
+    "self_link": "",
+    "offset": 0,
+    "limit": 100,
+    "items": [
+        {
+            "href": "/keep_services/112ci-bi6l4-f0r03wrqymotwql",
+            "kind": "arvados#keepService",
+            "etag": "7m64l69kko4bytpsykf8cay7t",
+            "uuid": "112ci-bi6l4-f0r03wrqymotwql",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.351577000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.351577000Z",
+            "service_host": "10.0.2.15",
+            "service_port": 9000,
+            "service_ssl_flag": false,
+            "service_type": "disk",
+            "read_only": false
+        },
+        {
+            "href": "/keep_services/112ci-bi6l4-hv02fg8sbti8ykk",
+            "kind": "arvados#keepService",
+            "etag": "bjzh7og2d9z949lbd38vnnslt",
+            "uuid": "112ci-bi6l4-hv02fg8sbti8ykk",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:48.314229000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:48.314229000Z",
+            "service_host": "10.0.2.15",
+            "service_port": 9001,
+            "service_ssl_flag": false,
+            "service_type": "disk",
+            "read_only": false
+        },
+        {
+            "href": "/keep_services/112ci-bi6l4-ko27cfbsf2ssx2m",
+            "kind": "arvados#keepService",
+            "etag": "4be61qkpt6nzdfff4vj9nkpmj",
+            "uuid": "112ci-bi6l4-ko27cfbsf2ssx2m",
+            "owner_uuid": "112ci-tpzed-000000000000000",
+            "created_at": "2017-11-03T10:04:36.355045000Z",
+            "modified_by_client_uuid": "112ci-ozdt8-xxy0ipzwti8gnmt",
+            "modified_by_user_uuid": "112ci-tpzed-000000000000000",
+            "modified_at": "2017-11-03T10:04:36.355045000Z",
+            "service_host": "10.0.2.15",
+            "service_port": 9002,
+            "service_ssl_flag": false,
+            "service_type": "proxy",
+            "read_only": false
+        }
+    ],
+    "items_available": 3
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json
new file mode 100644 (file)
index 0000000..c930ee2
--- /dev/null
@@ -0,0 +1,9 @@
+{
+    "kind": "arvados#keepServiceList",
+    "etag": "",
+    "self_link": "",
+    "offset": null,
+    "limit": null,
+    "items": [],
+    "items_available": 0
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-create.json
new file mode 100644 (file)
index 0000000..87d09ab
--- /dev/null
@@ -0,0 +1,26 @@
+{
+    "href": "/users/ardev-tpzed-q6dvn7sby55up1b",
+    "kind": "arvados#user",
+    "etag": "b21emst9eu9u1wdpqcz6la583",
+    "uuid": "ardev-tpzed-q6dvn7sby55up1b",
+    "owner_uuid": "ardev-tpzed-000000000000000",
+    "created_at": "2017-10-30T19:42:43.324740000Z",
+    "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+    "modified_by_user_uuid": "ardev-tpzed-o3km4ug9jhs189j",
+    "modified_at": "2017-10-31T09:01:03.985749000Z",
+    "email": "example@email.com",
+    "username": "johnwayne",
+    "full_name": "John Wayne",
+    "first_name": "John",
+    "last_name": "Wayne",
+    "identity_url": "ardev-tpzed-r09t5ztf5qd3rlj",
+    "is_active": true,
+    "is_admin": null,
+    "is_invited": true,
+    "prefs": {},
+    "writable_by": [
+        "ardev-tpzed-000000000000000",
+        "ardev-tpzed-q6dvn7sby55up1b",
+        "ardev-j7d0g-000000000000000"
+    ]
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-get.json
new file mode 100644 (file)
index 0000000..87d09ab
--- /dev/null
@@ -0,0 +1,26 @@
+{
+    "href": "/users/ardev-tpzed-q6dvn7sby55up1b",
+    "kind": "arvados#user",
+    "etag": "b21emst9eu9u1wdpqcz6la583",
+    "uuid": "ardev-tpzed-q6dvn7sby55up1b",
+    "owner_uuid": "ardev-tpzed-000000000000000",
+    "created_at": "2017-10-30T19:42:43.324740000Z",
+    "modified_by_client_uuid": "ardev-ozdt8-97tzh5x96spqkay",
+    "modified_by_user_uuid": "ardev-tpzed-o3km4ug9jhs189j",
+    "modified_at": "2017-10-31T09:01:03.985749000Z",
+    "email": "example@email.com",
+    "username": "johnwayne",
+    "full_name": "John Wayne",
+    "first_name": "John",
+    "last_name": "Wayne",
+    "identity_url": "ardev-tpzed-r09t5ztf5qd3rlj",
+    "is_active": true,
+    "is_admin": null,
+    "is_invited": true,
+    "prefs": {},
+    "writable_by": [
+        "ardev-tpzed-000000000000000",
+        "ardev-tpzed-q6dvn7sby55up1b",
+        "ardev-j7d0g-000000000000000"
+    ]
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-list.json
new file mode 100644 (file)
index 0000000..2ff1ded
--- /dev/null
@@ -0,0 +1,115 @@
+{
+    "kind": "arvados#userList",
+    "etag": "",
+    "self_link": "",
+    "offset": 0,
+    "limit": 100,
+    "items": [
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-12389ux30402est",
+            "email": "test.user@email.com",
+            "first_name": "Test",
+            "last_name": "User",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123vn7sby55up1b",
+            "email": "test.user1@email.com",
+            "first_name": "Test1",
+            "last_name": "User1",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123g70lq1m3c6fz",
+            "email": "test.user2@email.com",
+            "first_name": "Test2",
+            "last_name": "User2",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-1233zsoudkgq92e",
+            "email": "test.user3@email.com",
+            "first_name": "Test3",
+            "last_name": "User3",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-1234xjvs0clppd3",
+            "email": "test.user4@email.com",
+            "first_name": "Test4",
+            "last_name": "User4",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123bpggscmn6z8m",
+            "email": "test.user5@email.com",
+            "first_name": "Test5",
+            "last_name": "User5",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-1231uysivaz6ipi",
+            "email": "test.user6@email.com",
+            "first_name": "Test6",
+            "last_name": "User6",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123b0a1wu0q6cm4",
+            "email": "test.user7@email.com",
+            "first_name": "Test7",
+            "last_name": "User7",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123bz6n6si24t6v",
+            "email": "test.user8@email.com",
+            "first_name": "Test8",
+            "last_name": "User8",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123lxhzifligheu",
+            "email": "test.user9@email.com",
+            "first_name": "Test9",
+            "last_name": "User9",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123gaz31qbopewh",
+            "email": "test.user10@email.com",
+            "first_name": "Test10",
+            "last_name": "User10",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-123dmcf65z973uo",
+            "email": "test.user11@email.com",
+            "first_name": "Test11",
+            "last_name": "User11",
+            "is_active": true
+        },
+        {
+            "kind": "arvados#user",
+            "uuid": "ardev-tpzed-1239y3lj7ybpyg8",
+            "email": "test.user12@email.com",
+            "first_name": "Test12",
+            "last_name": "User12",
+            "is_active": true
+        }
+
+    ],
+    "items_available": 13
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/users-system.json
new file mode 100644 (file)
index 0000000..38441c5
--- /dev/null
@@ -0,0 +1,24 @@
+{
+    "href": "/users/ardev-tpzed-000000000000000",
+    "kind": "arvados#user",
+    "etag": "2ehmra38iwfuexvz1cjno5xua",
+    "uuid": "ardev-tpzed-000000000000000",
+    "owner_uuid": "ardev-tpzed-000000000000000",
+    "created_at": "2016-10-19T07:48:04.838534000Z",
+    "modified_by_client_uuid": null,
+    "modified_by_user_uuid": "ardev-tpzed-000000000000000",
+    "modified_at": "2016-10-19T07:48:04.833164000Z",
+    "email": "root",
+    "username": null,
+    "full_name": "root",
+    "first_name": "root",
+    "last_name": "",
+    "identity_url": null,
+    "is_active": true,
+    "is_admin": true,
+    "is_invited": true,
+    "prefs": {},
+    "writable_by": [
+        "ardev-tpzed-000000000000000"
+    ]
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/selfsigned.keystore.jks b/sdk/java-v2/src/test/resources/selfsigned.keystore.jks
new file mode 100644 (file)
index 0000000..86b126a
Binary files /dev/null and b/sdk/java-v2/src/test/resources/selfsigned.keystore.jks differ
diff --git a/sdk/java-v2/test-in-docker.sh b/sdk/java-v2/test-in-docker.sh
new file mode 100755 (executable)
index 0000000..c685005
--- /dev/null
@@ -0,0 +1,48 @@
+#!/bin/bash -x
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+#
+set -e
+
+format_last_commit_here() {
+    local format="$1"; shift
+    TZ=UTC git log -n1 --first-parent "--format=format:$format" .
+}
+
+version_from_git() {
+    # Output the version being built, or if we're building a
+    # dev/prerelease, output a version number based on the git log for
+    # the current working directory.
+    if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
+        echo "$ARVADOS_BUILDING_VERSION"
+        return
+    fi
+
+    local git_ts git_hash prefix
+    if [[ -n "$1" ]] ; then
+        prefix="$1"
+    else
+        prefix="0.1"
+    fi
+
+    declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
+    ARVADOS_BUILDING_VERSION="$(git describe --abbrev=0).$(date -ud "@$git_ts" +%Y%m%d%H%M%S)"
+    echo "$ARVADOS_BUILDING_VERSION"
+} 
+
+nohash_version_from_git() {
+    version_from_git $1 | cut -d. -f1-3
+}
+
+timestamp_from_git() {
+    format_last_commit_here "%ct"
+}
+if [[ -n "$1" ]]; then
+    build_version="$1"
+else
+    build_version="$(version_from_git)"
+fi
+#UID=$(id -u) # UID is read-only on many systems
+exec docker run --rm --user $UID -v $PWD:$PWD -w $PWD gradle /bin/sh -c 'gradle clean && gradle test && gradle jar install '"$gradle_upload"
\ No newline at end of file
index b613e97a10133adf5be8160057d66b31ed3a84a6..77e3c75af28ad37531cd7ea3eb186fb149a3c679 100644 (file)
@@ -53,8 +53,6 @@ class ApplicationController < ActionController::Base
   before_action(:render_404_if_no_object,
                 except: [:index, :create] + ERROR_ACTIONS)
 
-  theme Rails.configuration.arvados_theme
-
   attr_writer :resource_attrs
 
   begin
@@ -83,14 +81,11 @@ class ApplicationController < ActionController::Base
 
   def default_url_options
     options = {}
-    if Rails.configuration.host
-      options[:host] = Rails.configuration.host
-    end
-    if Rails.configuration.port
-      options[:port] = Rails.configuration.port
-    end
-    if Rails.configuration.protocol
-      options[:protocol] = Rails.configuration.protocol
+    if Rails.configuration.Services.Controller.ExternalURL != URI("")
+      exturl = Rails.configuration.Services.Controller.ExternalURL
+      options[:host] = exturl.host
+      options[:port] = exturl.port
+      options[:protocol] = exturl.scheme
     end
     options
   end
@@ -306,7 +301,7 @@ class ApplicationController < ActionController::Base
       limit_query.each do |record|
         new_limit += 1
         read_total += record.read_length.to_i
-        if read_total >= Rails.configuration.max_index_database_read
+        if read_total >= Rails.configuration.API.MaxIndexDatabaseRead
           new_limit -= 1 if new_limit > 1
           @limit = new_limit
           break
@@ -419,8 +414,7 @@ class ApplicationController < ActionController::Base
   end
 
   def disable_api_methods
-    if Rails.configuration.disable_api_methods.
-        include?(controller_name + "." + action_name)
+    if Rails.configuration.API.DisabledAPIs.include?(controller_name + "." + action_name)
       send_error("Disabled", status: 404)
     end
   end
index f7db1ef121f606b597772ce695aadb859f0a3e31..1004f070215e7110c22e6aad7ced317942eb8adf 100644 (file)
@@ -191,7 +191,7 @@ class Arvados::V1::GroupsController < ApplicationController
 
     table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
 
-    disabled_methods = Rails.configuration.disable_api_methods
+    disabled_methods = Rails.configuration.API.DisabledAPIs
     avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
     klasses = avail_klasses.keys
 
index c12bc6e90c8f8bf68ce21b99f8bc658df1cf3510..6c3822437607ae0fd6dce94a7e20ea8731c04232 100644 (file)
@@ -19,7 +19,7 @@ class Arvados::V1::HealthcheckController < ApplicationController
     mgmt_token = Rails.configuration.ManagementToken
     auth_header = request.headers['Authorization']
 
-    if !mgmt_token
+    if mgmt_token == ""
       send_json ({"errors" => "disabled"}), status: 404
     elsif !auth_header
       send_json ({"errors" => "authorization required"}), status: 401
index 8ff2a97c467d0f09f867e0992bcb4167c9953e7d..13e47f76cdf88b17c2ee659dafce1417678ab2ee 100644 (file)
@@ -25,6 +25,8 @@ class Arvados::V1::SchemaController < ApplicationController
   def discovery_doc
     Rails.cache.fetch 'arvados_v1_rest_discovery' do
       Rails.application.eager_load!
+      remoteHosts = {}
+      Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end }
       discovery = {
         kind: "discovery#restDescription",
         discoveryVersion: "v1",
@@ -39,41 +41,34 @@ class Arvados::V1::SchemaController < ApplicationController
         title: "Arvados API",
         description: "The API to interact with Arvados.",
         documentationLink: "http://doc.arvados.org/api/index.html",
-        defaultCollectionReplication: Rails.configuration.default_collection_replication,
+        defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication,
         protocol: "rest",
         baseUrl: root_url + "arvados/v1/",
         basePath: "/arvados/v1/",
         rootUrl: root_url,
         servicePath: "arvados/v1/",
         batchPath: "batch",
-        uuidPrefix: Rails.application.config.uuid_prefix,
-        defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
-        blobSignatureTtl: Rails.application.config.blob_signature_ttl,
-        maxRequestSize: Rails.application.config.max_request_size,
-        maxItemsPerResponse: Rails.application.config.max_items_per_response,
-        dockerImageFormats: Rails.application.config.docker_image_formats,
-        crunchLogBytesPerEvent: Rails.application.config.crunch_log_bytes_per_event,
-        crunchLogSecondsBetweenEvents: Rails.application.config.crunch_log_seconds_between_events,
-        crunchLogThrottlePeriod: Rails.application.config.crunch_log_throttle_period,
-        crunchLogThrottleBytes: Rails.application.config.crunch_log_throttle_bytes,
-        crunchLogThrottleLines: Rails.application.config.crunch_log_throttle_lines,
-        crunchLimitLogBytesPerJob: Rails.application.config.crunch_limit_log_bytes_per_job,
-        crunchLogPartialLineThrottlePeriod: Rails.application.config.crunch_log_partial_line_throttle_period,
-        crunchLogUpdatePeriod: Rails.application.config.crunch_log_update_period,
-        crunchLogUpdateSize: Rails.application.config.crunch_log_update_size,
-        remoteHosts: Rails.configuration.remote_hosts,
-        remoteHostsViaDNS: Rails.configuration.remote_hosts_via_dns,
-        websocketUrl: Rails.application.config.websocket_address,
-        workbenchUrl: Rails.application.config.workbench_address,
-        keepWebServiceUrl: Rails.application.config.keep_web_service_url,
-        gitUrl: case Rails.application.config.git_repo_https_base
-                when false
-                  ''
-                when true
-                  'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix
-                else
-                  Rails.application.config.git_repo_https_base
-                end,
+        uuidPrefix: Rails.configuration.ClusterID,
+        defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime,
+        blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,
+        maxRequestSize: Rails.configuration.API.MaxRequestSize,
+        maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
+        dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats,
+        crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
+        crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
+        crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
+        crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes,
+        crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines,
+        crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob,
+        crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod,
+        crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,
+        crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,
+        remoteHosts: remoteHosts,
+        remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
+        websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
+        workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
+        keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
+        gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
         parameters: {
           alt: {
             type: "string",
@@ -405,7 +400,7 @@ class Arvados::V1::SchemaController < ApplicationController
           end
         end
       end
-      Rails.configuration.disable_api_methods.each do |method|
+      Rails.configuration.API.DisabledAPIs.each do |method|
         ctrl, action = method.split('.', 2)
         discovery[:resources][ctrl][:methods].delete(action.to_sym)
       end
index b421f54596f9f5274eef4f943fb4ab09a2f96321..4b2b985e023679b783bb358543eac8d7c8f54829 100644 (file)
@@ -12,8 +12,8 @@ class StaticController < ApplicationController
   def home
     respond_to do |f|
       f.html do
-        if Rails.configuration.workbench_address
-          redirect_to Rails.configuration.workbench_address
+        if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.empty?
+          redirect_to Rails.configuration.Services.Workbench1.ExternalURL.to_s
         else
           render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
         end
index 237156f1161ec53d53322cb3c9271c39ab945805..6e18cdd4607bb5aa6e5b49b608f1d15882891167 100644 (file)
@@ -52,7 +52,7 @@ class UserSessionsController < ApplicationController
                       :first_name => omniauth['info']['first_name'],
                       :last_name => omniauth['info']['last_name'],
                       :identity_url => omniauth['info']['identity_url'],
-                      :is_active => Rails.configuration.new_users_are_active,
+                      :is_active => Rails.configuration.Users.NewUsersAreActive,
                       :owner_uuid => system_user_uuid)
       if omniauth['info']['username']
         user.set_initial_username(requested: omniauth['info']['username'])
@@ -120,7 +120,7 @@ class UserSessionsController < ApplicationController
 
     flash[:notice] = 'You have logged off'
     return_to = params[:return_to] || root_url
-    redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+    redirect_to "#{Rails.configuration.Services.SSO.ExternalURL}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
   end
 
   # login - Just bounce to /auth/joshid. The only purpose of this function is
index 87a5699f49a2061a37fd2ca3117fc0bc23ce750e..45e329030f6bfd54e7aea559d011029ab932bdb1 100644 (file)
@@ -5,32 +5,32 @@
 class AdminNotifier < ActionMailer::Base
   include AbstractController::Callbacks
 
-  default from: Rails.configuration.admin_notifier_email_from
+  default from: Rails.configuration.Users.AdminNotifierEmailFrom
 
   def new_user(user)
     @user = user
-    if not Rails.configuration.new_user_notification_recipients.empty? then
-      @recipients = Rails.configuration.new_user_notification_recipients
+    if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then
+      @recipients = Rails.configuration.Users.NewUserNotificationRecipients
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
 
       add_to_subject = ''
-      if Rails.configuration.auto_setup_new_users
+      if Rails.configuration.Users.AutoSetupNewUsers
         add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
       end
 
       mail(to: @recipients,
-           subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+           subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New user created#{add_to_subject} notification"
           )
     end
   end
 
   def new_inactive_user(user)
     @user = user
-    if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
-      @recipients = Rails.configuration.new_inactive_user_notification_recipients
+    if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then
+      @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
       mail(to: @recipients,
-           subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+           subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification"
           )
     end
   end
index 8c0c5ec863bccf5b91893cc7ef3bfbf5c3c42edd..849eefe8e1b66bfa7f14eb0c0a158131fbacbb07 100644 (file)
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class ProfileNotifier < ActionMailer::Base
-  default from: Rails.configuration.admin_notifier_email_from
+  default from: Rails.configuration.Users.AdminNotifierEmailFrom
 
   def profile_created(user, address)
     @user = user
index 5fb7036bf2596acb8425c2e6695b8a63ad259da8..3d1b91f20eda0b3b75849dd7efca504d9059b080 100644 (file)
@@ -5,7 +5,7 @@
 class UserNotifier < ActionMailer::Base
   include AbstractController::Callbacks
 
-  default from: Rails.configuration.user_notifier_email_from
+  default from: Rails.configuration.Users.UserNotifierEmailFrom
 
   def account_is_setup(user)
     @user = user
index 38538cb4ffbe8d6db29fcc430cc67620f25641b4..7645d1597ca726579dd91ead5285a9f0253c3873 100644 (file)
@@ -87,14 +87,14 @@ class ApiClientAuthorization < ArvadosModel
   end
 
   def self.remote_host(uuid_prefix:)
-    Rails.configuration.remote_hosts[uuid_prefix] ||
-      (Rails.configuration.remote_hosts_via_dns &&
+    (Rails.configuration.RemoteClusters[uuid_prefix].andand.Host) ||
+      (Rails.configuration.RemoteClusters["*"].Proxy &&
        uuid_prefix+".arvadosapi.com")
   end
 
   def self.validate(token:, remote: nil)
     return nil if !token
-    remote ||= Rails.configuration.uuid_prefix
+    remote ||= Rails.configuration.ClusterID
 
     case token[0..2]
     when 'v2/'
@@ -134,7 +134,7 @@ class ApiClientAuthorization < ArvadosModel
       end
 
       uuid_prefix = uuid[0..4]
-      if uuid_prefix == Rails.configuration.uuid_prefix
+      if uuid_prefix == Rails.configuration.ClusterID
         # If the token were valid, we would have validated it above
         return nil
       elsif uuid_prefix.length != 5
@@ -153,7 +153,7 @@ class ApiClientAuthorization < ArvadosModel
       # [re]validate it.
       begin
         clnt = HTTPClient.new
-        if Rails.configuration.sso_insecure
+        if Rails.configuration.TLS.Insecure
           clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
         else
           # Use system CA certificates
@@ -164,7 +164,7 @@ class ApiClientAuthorization < ArvadosModel
         end
         remote_user = SafeJSON.load(
           clnt.get_content('https://' + host + '/arvados/v1/users/current',
-                           {'remote' => Rails.configuration.uuid_prefix},
+                           {'remote' => Rails.configuration.ClusterID},
                            {'Authorization' => 'Bearer ' + token}))
       rescue => e
         Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
@@ -187,8 +187,8 @@ class ApiClientAuthorization < ArvadosModel
           end
         end
 
-        if Rails.configuration.new_users_are_active ||
-           Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
+        if Rails.configuration.Users.NewUsersAreActive ||
+           Rails.configuration.RemoteClusters[remote_user['uuid'][0..4]].andand["ActivateUsers"]
           # Update is_active to whatever it is at the remote end
           user.is_active = remote_user['is_active']
         elsif !remote_user['is_active']
index e619abe8c800781b17fc3c2c07f8fa69008ee6cf..339bc9e23fdaf2334b060fda789e97130772be6a 100644 (file)
@@ -411,7 +411,7 @@ class ArvadosModel < ApplicationRecord
   end
 
   def logged_attributes
-    attributes.except(*Rails.configuration.unlogged_attributes)
+    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes)
   end
 
   def self.full_text_searchable_columns
@@ -735,7 +735,7 @@ class ArvadosModel < ApplicationRecord
   end
 
   def self.uuid_like_pattern
-    "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
+    "#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________"
   end
 
   def self.uuid_regex
@@ -814,8 +814,8 @@ class ArvadosModel < ApplicationRecord
   end
 
   def is_audit_logging_enabled?
-    return !(Rails.configuration.max_audit_log_age.to_i == 0 &&
-             Rails.configuration.max_audit_log_delete_batch.to_i > 0)
+    return !(Rails.configuration.AuditLogs.MaxAge.to_i == 0 &&
+             Rails.configuration.AuditLogs.MaxDeleteBatch.to_i > 0)
   end
 
   def log_start_state
index 55a257856c989faa18bc70dfe075cc7d534b90e1..54a4f369d9a058666c826cdd1a19e272962f8f3e 100644 (file)
@@ -51,15 +51,15 @@ class Blob
       timestamp = opts[:expire]
     else
       timestamp = db_current_time.to_i +
-        (opts[:ttl] || Rails.configuration.blob_signature_ttl)
+        (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL)
     end
     timestamp_hex = timestamp.to_s(16)
     # => "53163cb4"
-    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
 
     # Generate a signature.
     signature =
-      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+      generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
                          blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)
 
     blob_locator + '+A' + signature + '@' + timestamp_hex
@@ -103,10 +103,10 @@ class Blob
     if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
       raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
     end
-    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
 
     my_signature =
-      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+      generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
                          blob_hash, opts[:api_token], timestamp, blob_signature_ttl)
 
     if my_signature != given_signature
index 590228b1af354f0f10bad06171706f4ed88c05fa..e0f653969948d6187865229be31b02f5e31a5294 100644 (file)
@@ -125,7 +125,7 @@ class Collection < ArvadosModel
             # Signature provided, but verify_signature did not like it.
             logger.warn "Invalid signature on locator #{tok}"
             raise ArvadosModel::PermissionDeniedError
-          elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+          elsif !Rails.configuration.Collections.BlobSigning
             # No signature provided, but we are running in insecure mode.
             logger.debug "Missing signature on locator #{tok} ignored"
           elsif Blob.new(tok).empty?
@@ -323,9 +323,9 @@ class Collection < ArvadosModel
   end
 
   def should_preserve_version?
-    return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys))
+    return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys))
 
-    idle_threshold = Rails.configuration.preserve_version_if_idle
+    idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle
     if !self.preserve_version_was &&
       (idle_threshold < 0 ||
         (idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))
@@ -371,7 +371,7 @@ class Collection < ArvadosModel
       return manifest_text
     else
       token = Thread.current[:token]
-      exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl,
+      exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL,
              trash_at].compact.map(&:to_i).min
       self.class.sign_manifest manifest_text, token, exp
     end
@@ -379,7 +379,7 @@ class Collection < ArvadosModel
 
   def self.sign_manifest manifest, token, exp=nil
     if exp.nil?
-      exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+      exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
     end
     signing_opts = {
       api_token: token,
@@ -489,7 +489,7 @@ class Collection < ArvadosModel
   #
   # If filter_compatible_format is true (the default), only return image
   # collections which are support by the installation as indicated by
-  # Rails.configuration.docker_image_formats.  Will follow
+  # Rails.configuration.Containers.SupportedDockerImageFormats.  Will follow
   # 'docker_image_migration' links if search_term resolves to an incompatible
   # image, but an equivalent compatible image is available.
   def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)
@@ -500,15 +500,17 @@ class Collection < ArvadosModel
       joins("JOIN collections ON links.head_uuid = collections.uuid").
       order("links.created_at DESC")
 
-    if (Rails.configuration.docker_image_formats.include? 'v1' and
-        Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false
+    docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats
+
+    if (docker_image_formats.include? 'v1' and
+        docker_image_formats.include? 'v2') or filter_compatible_format == false
       pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/
-    elsif Rails.configuration.docker_image_formats.include? 'v2'
+    elsif docker_image_formats.include? 'v2'
       pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/
-    elsif Rails.configuration.docker_image_formats.include? 'v1'
+    elsif docker_image_formats.include? 'v1'
       pattern = /^[0-9A-Fa-f]{64}\.tar$/
     else
-      raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}"
+      raise "Unrecognized configuration for docker_image_formats #{docker_image_formats}"
     end
 
     # If the search term is a Collection locator that contains one file
@@ -516,7 +518,9 @@ class Collection < ArvadosModel
     if loc = Keep::Locator.parse(search_term)
       loc.strip_hints!
       coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)
-      if coll_match.any? or Rails.configuration.remote_hosts.length == 0
+      rc = Rails.configuration.RemoteClusters.select{ |k|
+        k != :"*" && k != Rails.configuration.ClusterID}
+      if coll_match.any? or rc.length == 0
         return get_compatible_images(readers, pattern, coll_match)
       else
         # Allow bare pdh that doesn't exist in the local database so
index 921c690cd00f78f6fc2b46bbace23fff89992db8..a3cef64212ba04122b28148482a03c3b431470cb 100644 (file)
@@ -148,7 +148,7 @@ class Commit < ActiveRecord::Base
     unless src_gitdir
       raise ArgumentError.new "no local repository for #{repo_name}"
     end
-    dst_gitdir = Rails.configuration.git_internal_dir
+    dst_gitdir = Rails.configuration.Containers.JobsAPI.GitInternalDir
 
     begin
       commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
index 3d5152c3ffeeea60618bf93fa2571653baae7c3e..59e8552f326dc59dc17bea621958084ad793e64a 100644 (file)
@@ -17,7 +17,7 @@ class CommitAncestor < ActiveRecord::Base
   protected
 
   def ask_git_whether_is
-    @gitdirbase = Rails.configuration.git_repositories_dir
+    @gitdirbase = Rails.configuration.Git.Repositories
     self.is = nil
     Dir.foreach @gitdirbase do |repo|
       next if repo.match(/^\./)
index fb900a993d464e809fd93ce0ecdacdae35995027..2bbdd0a07f45508a3515e8384fb9bca7e05a6817 100644 (file)
@@ -89,7 +89,8 @@ class Container < ArvadosModel
     nil => [Queued],
     Queued => [Locked, Cancelled],
     Locked => [Queued, Running, Cancelled],
-    Running => [Complete, Cancelled]
+    Running => [Complete, Cancelled],
+    Complete => [Cancelled]
   }
 
   def self.limit_index_columns_read
@@ -205,7 +206,7 @@ class Container < ArvadosModel
     rc = {}
     defaults = {
       'keep_cache_ram' =>
-      Rails.configuration.container_default_keep_cache_ram,
+      Rails.configuration.Containers.DefaultKeepCacheRAM,
     }
     defaults.merge(runtime_constraints).each do |k, v|
       if v.is_a? Array
@@ -368,7 +369,7 @@ class Container < ArvadosModel
     transaction do
       reload(lock: 'FOR UPDATE')
       check_unlock_fail
-      if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+      if self.lock_count < Rails.configuration.Containers.MaxDispatchAttempts
         update_attributes!(state: Queued)
       else
         update_attributes!(state: Cancelled,
@@ -497,7 +498,7 @@ class Container < ArvadosModel
       return false
     end
 
-    if self.state == Running &&
+    if self.state_was == Running &&
        !current_api_client_authorization.nil? &&
        (current_api_client_authorization.uuid == self.auth_uuid ||
         current_api_client_authorization.token == self.runtime_token)
@@ -505,6 +506,8 @@ class Container < ArvadosModel
       # change priority or log.
       permitted.push *final_attrs
       permitted = permitted - [:log, :priority]
+    elsif !current_user.andand.is_admin
+      raise PermissionDeniedError
     elsif self.locked_by_uuid && self.locked_by_uuid != current_api_client_authorization.andand.uuid
       # When locked, progress fields cannot be updated by the wrong
       # dispatcher, even though it has admin privileges.
@@ -645,64 +648,76 @@ class Container < ArvadosModel
     # This container is finished so finalize any associated container requests
     # that are associated with this container.
     if self.state_changed? and self.final?
-      act_as_system_user do
-
-        if self.state == Cancelled
-          retryable_requests = ContainerRequest.where("container_uuid = ? and priority > 0 and state = 'Committed' and container_count < container_count_max", uuid)
-        else
-          retryable_requests = []
-        end
+      # These get wiped out by with_lock (which reloads the record),
+      # so record them now in case we need to schedule a retry.
+      prev_secret_mounts = self.secret_mounts_was
+      prev_runtime_token = self.runtime_token_was
+
+      # Need to take a lock on the container to ensure that any
+      # concurrent container requests that might try to reuse this
+      # container will block until the container completion
+      # transaction finishes.  This ensure that concurrent container
+      # requests that try to reuse this container are finalized (on
+      # Complete) or don't reuse it (on Cancelled).
+      self.with_lock do
+        act_as_system_user do
+          if self.state == Cancelled
+            retryable_requests = ContainerRequest.where("container_uuid = ? and priority > 0 and state = 'Committed' and container_count < container_count_max", uuid)
+          else
+            retryable_requests = []
+          end
 
-        if retryable_requests.any?
-          c_attrs = {
-            command: self.command,
-            cwd: self.cwd,
-            environment: self.environment,
-            output_path: self.output_path,
-            container_image: self.container_image,
-            mounts: self.mounts,
-            runtime_constraints: self.runtime_constraints,
-            scheduling_parameters: self.scheduling_parameters,
-            secret_mounts: self.secret_mounts_was,
-            runtime_token: self.runtime_token_was,
-            runtime_user_uuid: self.runtime_user_uuid,
-            runtime_auth_scopes: self.runtime_auth_scopes
-          }
-          c = Container.create! c_attrs
-          retryable_requests.each do |cr|
-            cr.with_lock do
-              leave_modified_by_user_alone do
-                # Use row locking because this increments container_count
-                cr.container_uuid = c.uuid
-                cr.save!
+          if retryable_requests.any?
+            c_attrs = {
+              command: self.command,
+              cwd: self.cwd,
+              environment: self.environment,
+              output_path: self.output_path,
+              container_image: self.container_image,
+              mounts: self.mounts,
+              runtime_constraints: self.runtime_constraints,
+              scheduling_parameters: self.scheduling_parameters,
+              secret_mounts: prev_secret_mounts,
+              runtime_token: prev_runtime_token,
+              runtime_user_uuid: self.runtime_user_uuid,
+              runtime_auth_scopes: self.runtime_auth_scopes
+            }
+            c = Container.create! c_attrs
+            retryable_requests.each do |cr|
+              cr.with_lock do
+                leave_modified_by_user_alone do
+                  # Use row locking because this increments container_count
+                  cr.container_uuid = c.uuid
+                  cr.save!
+                end
               end
             end
           end
-        end
 
-        # Notify container requests associated with this container
-        ContainerRequest.where(container_uuid: uuid,
-                               state: ContainerRequest::Committed).each do |cr|
-          leave_modified_by_user_alone do
-            cr.finalize!
+          # Notify container requests associated with this container
+          ContainerRequest.where(container_uuid: uuid,
+                                 state: ContainerRequest::Committed).each do |cr|
+            leave_modified_by_user_alone do
+              cr.finalize!
+            end
           end
-        end
 
-        # Cancel outstanding container requests made by this container.
-        ContainerRequest.
-          includes(:container).
-          where(requesting_container_uuid: uuid,
-                state: ContainerRequest::Committed).each do |cr|
-          leave_modified_by_user_alone do
-            cr.update_attributes!(priority: 0)
-            cr.container.reload
-            if cr.container.state == Container::Queued || cr.container.state == Container::Locked
-              # If the child container hasn't started yet, finalize the
-              # child CR now instead of leaving it "on hold", i.e.,
-              # Queued with priority 0.  (OTOH, if the child is already
-              # running, leave it alone so it can get cancelled the
-              # usual way, get a copy of the log collection, etc.)
-              cr.update_attributes!(state: ContainerRequest::Final)
+          # Cancel outstanding container requests made by this container.
+          ContainerRequest.
+            includes(:container).
+            where(requesting_container_uuid: uuid,
+                  state: ContainerRequest::Committed).each do |cr|
+            leave_modified_by_user_alone do
+              cr.update_attributes!(priority: 0)
+              cr.container.reload
+              if cr.container.state == Container::Queued || cr.container.state == Container::Locked
+                # If the child container hasn't started yet, finalize the
+                # child CR now instead of leaving it "on hold", i.e.,
+                # Queued with priority 0.  (OTOH, if the child is already
+                # running, leave it alone so it can get cancelled the
+                # usual way, get a copy of the log collection, etc.)
+                cr.update_attributes!(state: ContainerRequest::Final)
+              end
             end
           end
         end
index 292decafbfb94ad381ab84bcfe01da13c5e9d68d..c412e4b8500c141617b18de64007078f7b715c4d 100644 (file)
@@ -119,13 +119,34 @@ class ContainerRequest < ArvadosModel
   end
 
   def finalize_if_needed
-    if state == Committed && Container.find_by_uuid(container_uuid).final?
-      reload
-      act_as_system_user do
-        leave_modified_by_user_alone do
-          finalize!
+    return if state != Committed
+    while true
+      # get container lock first, then lock current container request
+      # (same order as Container#handle_completed). Locking always
+      # reloads the Container and ContainerRequest records.
+      c = Container.find_by_uuid(container_uuid)
+      c.lock!
+      self.lock!
+
+      if container_uuid != c.uuid
+        # After locking, we've noticed a race, the container_uuid is
+        # different than the container record we just loaded.  This
+        # can happen if Container#handle_completed scheduled a new
+        # container for retry and set container_uuid while we were
+        # waiting on the container lock.  Restart the loop and get the
+        # new container.
+        redo
+      end
+
+      if state == Committed && c.final?
+        # The current container is
+        act_as_system_user do
+          leave_modified_by_user_alone do
+            finalize!
+          end
         end
       end
+      return true
     end
   end
 
@@ -196,7 +217,7 @@ class ContainerRequest < ArvadosModel
     self.mounts ||= {}
     self.secret_mounts ||= {}
     self.cwd ||= "."
-    self.container_count_max ||= Rails.configuration.container_count_max
+    self.container_count_max ||= Rails.configuration.Containers.MaxRetryAttempts
     self.scheduling_parameters ||= {}
     self.output_ttl ||= 0
     self.priority ||= 0
@@ -210,7 +231,18 @@ class ContainerRequest < ArvadosModel
       return false
     end
     if state_changed? and state == Committed and container_uuid.nil?
-      self.container_uuid = Container.resolve(self).uuid
+      while true
+        c = Container.resolve(self)
+        c.lock!
+        if c.state == Container::Cancelled
+          # Lost a race, we have a lock on the container but the
+          # container was cancelled in a different request, restart
+          # the loop and resolve request to a new container.
+          redo
+        end
+        self.container_uuid = c.uuid
+        break
+      end
     end
     if self.container_uuid != self.container_uuid_was
       if self.container_count_changed?
@@ -252,7 +284,7 @@ class ContainerRequest < ArvadosModel
     if self.state == Committed
       # If preemptible instances (eg: AWS Spot Instances) are allowed,
       # ask them on child containers by default.
-      if Rails.configuration.preemptible_instances and !c.nil? and
+      if Rails.configuration.Containers.UsePreemptibleInstances and !c.nil? and
         self.scheduling_parameters['preemptible'].nil?
           self.scheduling_parameters['preemptible'] = true
       end
@@ -322,7 +354,7 @@ class ContainerRequest < ArvadosModel
             scheduling_parameters['partitions'].size)
             errors.add :scheduling_parameters, "partitions must be an array of strings"
       end
-      if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+      if !Rails.configuration.Containers.UsePreemptibleInstances and scheduling_parameters['preemptible']
         errors.add :scheduling_parameters, "preemptible instances are not allowed"
       end
       if scheduling_parameters.include? 'max_run_time' and
index 420386cdc2ac0b4cfa423bb423e9c6053ec8b4ba..4d63deb99cd1d236b348996228c0d3b036416e6e 100644 (file)
@@ -287,7 +287,7 @@ class Job < ArvadosModel
         log_reuse_info { "job #{j.uuid} has nil output" }
       elsif j.log.nil?
         log_reuse_info { "job #{j.uuid} has nil log" }
-      elsif Rails.configuration.reuse_job_if_outputs_differ
+      elsif Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer
         if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
           # Ignore: keep looking for an incomplete job or one whose
           # output is readable.
@@ -491,9 +491,9 @@ class Job < ArvadosModel
   end
 
   def find_docker_image_locator
-    if runtime_constraints.is_a? Hash
+    if runtime_constraints.is_a? Hash and Rails.configuration.Containers.JobsAPI.DefaultDockerImage != ""
       runtime_constraints['docker_image'] ||=
-        Rails.configuration.default_docker_image_for_jobs
+        Rails.configuration.Containers.JobsAPI.DefaultDockerImage
     end
 
     resolve_runtime_constraint("docker_image",
@@ -569,7 +569,7 @@ class Job < ArvadosModel
 
   def trigger_crunch_dispatch_if_cancelled
     if @need_crunch_dispatch_trigger
-      File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+      File.open(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger, 'wb') do
         # That's all, just create/touch a file for crunch-job to see.
       end
     end
index 148dffc23074138af0d70008e2cc49dd8b344ca1..044d83c287969cbcc61f7cec7724b8be630037de 100644 (file)
@@ -39,7 +39,7 @@ class Node < ArvadosModel
   api_accessible :superuser, :extend => :user do |t|
     t.add :first_ping_at
     t.add :info
-    t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
+    t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers }, :as => :nameservers
   end
 
   after_initialize do
@@ -47,7 +47,7 @@ class Node < ArvadosModel
   end
 
   def domain
-    super || Rails.configuration.compute_node_domain
+    super || Rails.configuration.Containers.SLURM.Managed.ComputeNodeDomain
   end
 
   def api_job_uuid
@@ -143,7 +143,7 @@ class Node < ArvadosModel
   protected
 
   def assign_hostname
-    if self.hostname.nil? and Rails.configuration.assign_node_hostname
+    if self.hostname.nil? and Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
       self.hostname = self.class.hostname_for_slot(self.slot_number)
     end
   end
@@ -159,7 +159,7 @@ class Node < ArvadosModel
                           # query label:
                           'Node.available_slot_number',
                           # [col_id, val] for $1 vars:
-                          [[nil, Rails.configuration.max_compute_nodes]],
+                          [[nil, Rails.configuration.Containers.MaxComputeVMs]],
                          ).rows.first.andand.first
   end
 
@@ -194,24 +194,25 @@ class Node < ArvadosModel
 
     template_vars = {
       hostname: hostname,
-      uuid_prefix: Rails.configuration.uuid_prefix,
+      uuid_prefix: Rails.configuration.ClusterID,
       ip_address: ip_address,
       ptr_domain: ptr_domain,
     }
 
-    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template
+    if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+        !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty?)
       tmpfile = nil
       begin
         begin
-          template = IO.read(Rails.configuration.dns_server_conf_template)
+          template = IO.read(Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate)
         rescue IOError, SystemCallError => e
-          logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}"
+          logger.error "Reading #{Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate}: #{e.message}"
           raise
         end
 
-        hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+        hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
         Tempfile.open(["#{hostname}-", ".conf.tmp"],
-                                 Rails.configuration.dns_server_conf_dir) do |f|
+                                 Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir) do |f|
           tmpfile = f.path
           f.puts template % template_vars
         end
@@ -227,20 +228,21 @@ class Node < ArvadosModel
       end
     end
 
-    if Rails.configuration.dns_server_update_command
-      cmd = Rails.configuration.dns_server_update_command % template_vars
+    if !Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand.empty?
+      cmd = Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand % template_vars
       if not system cmd
         logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
         ok = false
       end
     end
 
-    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command
-      restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt')
+    if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+        !Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand.to_s.empty?)
+      restartfile = File.join(Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, 'restart.txt')
       begin
         File.open(restartfile, 'w') do |f|
           # Typically, this is used to trigger a dns server restart
-          f.puts Rails.configuration.dns_server_reload_command
+          f.puts Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand
         end
       rescue IOError, SystemCallError => e
         logger.error "Unable to write #{restartfile}: #{e.message}"
@@ -252,7 +254,7 @@ class Node < ArvadosModel
   end
 
   def self.hostname_for_slot(slot_number)
-    config = Rails.configuration.assign_node_hostname
+    config = Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
 
     return nil if !config
 
@@ -261,10 +263,13 @@ class Node < ArvadosModel
 
   # At startup, make sure all DNS entries exist.  Otherwise, slurmctld
   # will refuse to start.
-  if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname
-    (0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
+  if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+      !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty? and
+      !Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname.empty?)
+
+    (0..Rails.configuration.Containers.MaxComputeVMs-1).each do |slot_number|
       hostname = hostname_for_slot(slot_number)
-      hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+      hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
       if !File.exist? hostfile
         n = Node.where(:slot_number => slot_number).first
         if n.nil? or n.ip_address.nil?
index 48655156c4c096fa406d3add1dcffa77b1707354..5e0e39f9bedb090d7b566b3ddf9389c8f10f95dd 100644 (file)
@@ -49,7 +49,7 @@ class Repository < ArvadosModel
     # prefers bare repositories over checkouts.
     [["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
       [:uuid, :name].each do |path_attr|
-        git_dir = File.join(Rails.configuration.git_repositories_dir,
+        git_dir = File.join(Rails.configuration.Git.Repositories,
                             repo_base % send(path_attr), *join_args)
         return git_dir if File.exist?(git_dir)
       end
@@ -98,22 +98,27 @@ class Repository < ArvadosModel
   end
 
   def ssh_clone_url
-    _clone_url :git_repo_ssh_base, 'git@git.%s.arvadosapi.com:'
+    _clone_url Rails.configuration.Services.GitSSH.andand.ExternalURL, 'ssh://git@git.%s.arvadosapi.com'
   end
 
   def https_clone_url
-    _clone_url :git_repo_https_base, 'https://git.%s.arvadosapi.com/'
+    _clone_url Rails.configuration.Services.GitHTTP.andand.ExternalURL, 'https://git.%s.arvadosapi.com/'
   end
 
   def _clone_url config_var, default_base_fmt
-    configured_base = Rails.configuration.send config_var
-    return nil if configured_base == false
-    prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5]
-    if prefix == Rails.configuration.uuid_prefix and configured_base != true
-      base = configured_base
+    if not config_var
+      return ""
+    end
+    prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
+    if prefix == Rails.configuration.ClusterID and config_var != URI("")
+      base = config_var
+    else
+      base = URI(default_base_fmt % prefix)
+    end
+    if base.scheme == "ssh"
+      '%s@%s:%s.git' % [base.user, base.host, name]
     else
-      base = default_base_fmt % prefix
+      '%s%s.git' % [base, name]
     end
-    '%s%s.git' % [base, name]
   end
 end
index de85cc5a8e75e32852407f9c8388777df1168119..989a975924c1bedaa143b75fea3ca830157118d2 100644 (file)
@@ -34,7 +34,7 @@ class User < ArvadosModel
   after_create :add_system_group_permission_link
   after_create :invalidate_permissions_cache
   after_create :auto_setup_new_user, :if => Proc.new { |user|
-    Rails.configuration.auto_setup_new_users and
+    Rails.configuration.Users.AutoSetupNewUsers and
     (user.uuid != system_user_uuid) and
     (user.uuid != anonymous_user_uuid)
   }
@@ -81,7 +81,7 @@ class User < ArvadosModel
 
   def is_invited
     !!(self.is_active ||
-       Rails.configuration.new_users_are_active ||
+       Rails.configuration.Users.NewUsersAreActive ||
        self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
   end
 
@@ -358,15 +358,15 @@ class User < ArvadosModel
     current_user.andand.is_admin or
       (self == current_user &&
        self.redirect_to_user_uuid.nil? &&
-       self.is_active == Rails.configuration.new_users_are_active)
+       self.is_active == Rails.configuration.Users.NewUsersAreActive)
   end
 
   def check_auto_admin
     return if self.uuid.end_with?('anonymouspublic')
     if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
-        Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+        !Rails.configuration.Users.AutoAdminUserWithEmail.empty? and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
        (User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
-        Rails.configuration.auto_admin_first_user)
+        Rails.configuration.Users.AutoAdminFirstUser)
       self.is_admin = true
       self.is_active = true
     end
@@ -381,7 +381,7 @@ class User < ArvadosModel
     quoted_name = self.class.connection.quote_string(basename)
     next_username = basename
     next_suffix = 1
-    while Rails.configuration.auto_setup_name_blacklist.include?(next_username)
+    while Rails.configuration.Users.AutoSetupUsernameBlacklist.include?(next_username)
       next_suffix += 1
       next_username = "%s%i" % [basename, next_suffix]
     end
@@ -493,7 +493,7 @@ class User < ArvadosModel
   # create login permission for the given vm_uuid, if it does not already exist
   def create_vm_login_permission_link(vm_uuid, repo_name)
     # vm uuid is optional
-    return if !vm_uuid
+    return if vm_uuid == ""
 
     vm = VirtualMachine.where(uuid: vm_uuid).first
     if !vm
@@ -563,10 +563,10 @@ class User < ArvadosModel
   def auto_setup_new_user
     setup(openid_prefix: Rails.configuration.default_openid_prefix)
     if username
-      create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid,
+      create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID,
                                       username)
       repo_name = "#{username}/#{username}"
-      if Rails.configuration.auto_setup_new_users_with_repository and
+      if Rails.configuration.Users.AutoSetupNewUsersWithRepository and
           Repository.where(name: repo_name).first.nil?
         repo = Repository.create!(name: repo_name, owner_uuid: uuid)
         Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
@@ -579,7 +579,7 @@ class User < ArvadosModel
   def send_profile_created_notification
     if self.prefs_changed?
       if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
-        profile_notification_address = Rails.configuration.user_profile_notification_address
+        profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress
         ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
       end
     end
index 097412c251f146723a4c680dafc98a7f7c73a5fa..afcf34da714e591066a7e5f706cbaa23d130db11 100644 (file)
@@ -7,10 +7,10 @@ A new user landed on the inactive user page:
 
   <%= @user.full_name %> <<%= @user.email %>>
 
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
 Please see workbench for more information:
 
-  <%= Rails.configuration.workbench_address %>
+  <%= Rails.configuration.Services.Workbench1.ExternalURL %>
 
 <% end -%>
 Thanks,
index d21513f7f02e44495070bf84225ede0b0bda7e1d..670b84b7c11dd874b1eaaf976cffe3b495633fab 100644 (file)
@@ -4,7 +4,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
 
 <%
   add_to_message = ''
-  if Rails.configuration.auto_setup_new_users
+  if Rails.configuration.Users.AutoSetupNewUsers
     add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
   end
 %>
@@ -14,12 +14,11 @@ A new user has been created<%=add_to_message%>:
 
 This user is <%= @user.is_active ? '' : 'NOT ' %>active.
 
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
 Please see workbench for more information:
 
-  <%= Rails.configuration.workbench_address %>
+  <%= Rails.configuration.Services.Workbench1.ExternalURL %>
 
 <% end -%>
 Thanks,
 Your friendly Arvados robot.
-
index ca7082774d155d1ca1b62f70b12e6e097cfba2ab..50d164bfa1e8493cef0b9d733062f6cfb4c8fbb6 100644 (file)
@@ -8,9 +8,9 @@ SPDX-License-Identifier: AGPL-3.0 %>
 Hi there,
 <% end -%>
 
-Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.workbench_address %>at
+Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services.Workbench1.ExternalURL %>at
 
-  <%= Rails.configuration.workbench_address %><%= "/" if !Rails.configuration.workbench_address.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
+  <%= Rails.configuration.Services.Workbench1.ExternalURL %><%= "/" if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
 
 for connection instructions.
 
index 98443b428f5313c82c5182cf348eb410c6c99b7d..3ad3cac2b27e3f750c8607856d3092d72ff6e669 100644 (file)
 # 5. Section in application.default.yml called "common"
 
 common:
-  ###
-  ### Essential site configuration
-  ###
-
-  # The prefix used for all database identifiers to identify the record as
-  # originating from this site.  Must be exactly 5 alphanumeric characters
-  # (lowercase ASCII letters and digits).
-  uuid_prefix: ~
-
-  # secret_token is a string of alphanumeric characters used by Rails
-  # to sign session tokens. IMPORTANT: This is a site secret. It
-  # should be at least 50 characters.
-  secret_token: ~
-
-  # blob_signing_key is a string of alphanumeric characters used to
-  # generate permission signatures for Keep locators. It must be
-  # identical to the permission key given to Keep. IMPORTANT: This is
-  # a site secret. It should be at least 50 characters.
-  #
-  # Modifying blob_signing_key will invalidate all existing
-  # signatures, which can cause programs to fail (e.g., arv-put,
-  # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
-  # no such processes are running.
-  blob_signing_key: ~
-
-  # These settings are provided by your OAuth2 provider (e.g.,
-  # sso-provider).
-  sso_app_secret: ~
-  sso_app_id: ~
-  sso_provider_url: ~
-
-  # If this is not false, HTML requests at the API server's root URL
-  # are redirected to this location, and it is provided in the text of
-  # user activation notification email messages to remind them where
-  # to log in.
-  workbench_address: false
-
-  # Client-facing URI for websocket service. Nginx should be
-  # configured to proxy this URI to arvados-ws; see
-  # http://doc.arvados.org/install/install-ws.html
-  #
-  # If websocket_address is false (which is the default), no websocket
-  # server will be advertised to clients. This configuration is not
-  # supported.
-  #
-  # Example:
-  #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
-  websocket_address: false
-
-  # Maximum number of websocket connections allowed
-  websocket_max_connections: 500
-
-  # Maximum number of events a single connection can be backlogged
-  websocket_max_notify_backlog: 1000
-
-  # Maximum number of subscriptions a single websocket connection can have
-  # active.
-  websocket_max_filters: 10
-
-  # Git repositories must be readable by api server, or you won't be
-  # able to submit crunch jobs. To pass the test suites, put a clone
-  # of the arvados tree in {git_repositories_dir}/arvados.git or
-  # {git_repositories_dir}/arvados/.git
-  git_repositories_dir: /var/lib/arvados/git/repositories
-
-  # This is a (bare) repository that stores commits used in jobs.  When a job
-  # runs, the source commits are first fetched into this repository, then this
-  # repository is used to deploy to compute nodes.  This should NOT be a
-  # subdirectory of {git_repositiories_dir}.
-  git_internal_dir: /var/lib/arvados/internal.git
-
-  # Default replication level for collections. This is used when a
-  # collection's replication_desired attribute is nil.
-  default_collection_replication: 2
-
-
-  ###
-  ### Overriding default advertised hostnames/URLs
-  ###
-
-  # If not false, this is the hostname, port, and protocol that will be used
-  # for root_url and advertised in the discovery document.  By default, use
-  # the default Rails logic for deciding on a hostname.
-  host: false
-  port: false
-  protocol: false
-
-  # Base part of SSH git clone url given with repository resources. If
-  # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
-  # used. If false, SSH clone URLs are not advertised. Include a
-  # trailing ":" or "/" if needed: it will not be added automatically.
-  git_repo_ssh_base: true
-
-  # Base part of HTTPS git clone urls given with repository
-  # resources. This is expected to be an arv-git-httpd service which
-  # accepts API tokens as HTTP-auth passwords. If true, the default
-  # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
-  # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
-  # if needed: it will not be added automatically.
-  git_repo_https_base: true
-
-
-  ###
-  ### New user and & email settings
-  ###
-
-  # Config parameters to automatically setup new users.  If enabled,
-  # this users will be able to self-activate.  Enable this if you want
-  # to run an open instance where anyone can create an account and use
-  # the system without requiring manual approval.
-  #
-  # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
-  # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
-  auto_setup_new_users: false
-  auto_setup_new_users_with_vm_uuid: false
-  auto_setup_new_users_with_repository: false
-  auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
-
-  # When new_users_are_active is set to true, new users will be active
-  # immediately.  This skips the "self-activate" step which enforces
-  # user agreements.  Should only be enabled for development.
-  new_users_are_active: false
-
-  # The e-mail address of the user you would like to become marked as an admin
-  # user on their first login.
-  # In the default configuration, authentication happens through the Arvados SSO
-  # server, which uses OAuth2 against Google's servers, so in that case this
-  # should be an address associated with a Google account.
-  auto_admin_user: false
-
-  # If auto_admin_first_user is set to true, the first user to log in when no
-  # other admin users exist will automatically become an admin user.
-  auto_admin_first_user: false
-
-  # Email address to notify whenever a user creates a profile for the
-  # first time
-  user_profile_notification_address: false
-
-  admin_notifier_email_from: arvados@example.com
-  email_subject_prefix: "[ARVADOS] "
-  user_notifier_email_from: arvados@example.com
-  new_user_notification_recipients: [ ]
-  new_inactive_user_notification_recipients: [ ]
-
-
-  ###
-  ### Limits, timeouts and durations
-  ###
-
-  # Lifetime (in seconds) of blob permission signatures generated by
-  # the API server. This determines how long a client can take (after
-  # retrieving a collection record) to retrieve the collection data
-  # from Keep. If the client needs more time than that (assuming the
-  # collection still has the same content and the relevant user/token
-  # still has permission) the client can retrieve the collection again
-  # to get fresh signatures.
-  #
-  # This must be exactly equal to the -blob-signature-ttl flag used by
-  # keepstore servers.  Otherwise, reading data blocks and saving
-  # collections will fail with HTTP 403 permission errors.
-  #
-  # Modifying blob_signature_ttl invalidates existing signatures; see
-  # blob_signing_key note above.
-  #
-  # The default is 2 weeks.
-  blob_signature_ttl: 1209600
-
-  # Default lifetime for ephemeral collections: 2 weeks. This must not
-  # be less than blob_signature_ttl.
-  default_trash_lifetime: 1209600
-
-  # Interval (seconds) between trash sweeps. During a trash sweep,
-  # collections are marked as trash if their trash_at time has
-  # arrived, and deleted if their delete_at time has arrived.
-  trash_sweep_interval: 60
-
-  # Interval (seconds) between asynchronous permission view updates. Any
-  # permission-updating API called with the 'async' parameter schedules a an
-  # update on the permission view in the future, if not already scheduled.
-  async_permissions_update_interval: 20
-
-  # Maximum characters of (JSON-encoded) query parameters to include
-  # in each request log entry. When params exceed this size, they will
-  # be JSON-encoded, truncated to this size, and logged as
-  # params_truncated.
-  max_request_log_params_size: 2000
-
-  # Maximum size (in bytes) allowed for a single API request.  This
-  # limit is published in the discovery document for use by clients.
-  # Note: You must separately configure the upstream web server or
-  # proxy to actually enforce the desired maximum request size on the
-  # server side.
-  max_request_size: 134217728
-
-  # Limit the number of bytes read from the database during an index
-  # request (by retrieving and returning fewer rows than would
-  # normally be returned in a single response).
-  # Note 1: This setting never reduces the number of returned rows to
-  # zero, no matter how big the first data row is.
-  # Note 2: Currently, this is only checked against a specific set of
-  # columns that tend to get large (collections.manifest_text,
-  # containers.mounts, workflows.definition). Other fields (e.g.,
-  # "properties" hashes) are not counted against this limit.
-  max_index_database_read: 134217728
-
-  # Maximum number of items to return when responding to a APIs that
-  # can return partial result sets using limit and offset parameters
-  # (e.g., *.index, groups.contents). If a request specifies a "limit"
-  # parameter higher than this value, this value is used instead.
-  max_items_per_response: 1000
-
-  # When you run the db:delete_old_job_logs task, it will find jobs that
-  # have been finished for at least this many seconds, and delete their
-  # stderr logs from the logs table.
-  clean_job_log_rows_after: <%= 30.days %>
-
-  # When you run the db:delete_old_container_logs task, it will find
-  # containers that have been finished for at least this many seconds,
-  # and delete their stdout, stderr, arv-mount, crunch-run, and
-  # crunchstat logs from the logs table.
-  clean_container_log_rows_after: <%= 30.days %>
-
-  # Time to keep audit logs, in seconds. (An audit log is a row added
-  # to the "logs" table in the PostgreSQL database each time an
-  # Arvados object is created, modified, or deleted.)
-  #
-  # Currently, websocket event notifications rely on audit logs, so
-  # this should not be set lower than 600 (5 minutes).
-  max_audit_log_age: 1209600
-
-  # Maximum number of log rows to delete in a single SQL transaction.
-  #
-  # If max_audit_log_delete_batch is 0, log entries will never be
-  # deleted by Arvados. Cleanup can be done by an external process
-  # without affecting any Arvados system processes, as long as very
-  # recent (<5 minutes old) logs are not deleted.
-  #
-  # 100000 is a reasonable batch size for most sites.
-  max_audit_log_delete_batch: 0
-
-  # The maximum number of compute nodes that can be in use simultaneously
-  # If this limit is reduced, any existing nodes with slot number >= new limit
-  # will not be counted against the new limit. In other words, the new limit
-  # won't be strictly enforced until those nodes with higher slot numbers
-  # go down.
-  max_compute_nodes: 64
-
-  # These two settings control how frequently log events are flushed to the
-  # database.  Log lines are buffered until either crunch_log_bytes_per_event
-  # has been reached or crunch_log_seconds_between_events has elapsed since
-  # the last flush.
-  crunch_log_bytes_per_event: 4096
-  crunch_log_seconds_between_events: 1
-
-  # The sample period for throttling logs, in seconds.
-  crunch_log_throttle_period: 60
-
-  # Maximum number of bytes that job can log over crunch_log_throttle_period
-  # before being silenced until the end of the period.
-  crunch_log_throttle_bytes: 65536
-
-  # Maximum number of lines that job can log over crunch_log_throttle_period
-  # before being silenced until the end of the period.
-  crunch_log_throttle_lines: 1024
-
-  # Maximum bytes that may be logged by a single job.  Log bytes that are
-  # silenced by throttling are not counted against this total.
-  crunch_limit_log_bytes_per_job: 67108864
-
-  crunch_log_partial_line_throttle_period: 5
-
-  # Container logs are written to Keep and saved in a collection,
-  # which is updated periodically while the container runs.  This
-  # value sets the interval (given in seconds) between collection
-  # updates.
-  crunch_log_update_period: 1800
-
-  # The log collection is also updated when the specified amount of
-  # log data (given in bytes) is produced in less than one update
-  # period.
-  crunch_log_update_size: 33554432
-
-  # Attributes to suppress in events and audit logs.  Notably,
-  # specifying ["manifest_text"] here typically makes the database
-  # smaller and faster.
-  #
-  # Warning: Using any non-empty value here can have undesirable side
-  # effects for any client or component that relies on event logs.
-  # Use at your own risk.
-  unlogged_attributes: []
-
-  # API methods to disable. Disabled methods are not listed in the
-  # discovery document, and respond 404 to all requests.
-  # Example: ["jobs.create", "pipeline_instances.create"]
-  disable_api_methods: []
-
-  # Enable the legacy Jobs API.
-  # auto -- (default) enable the Jobs API only if it has been used before
-  #         (i.e., there are job records in the database)
-  # true -- enable the Jobs API despite lack of existing records.
-  # false -- disable the Jobs API despite presence of existing records.
-  enable_legacy_jobs_api: auto
-
-  ###
-  ### Crunch, DNS & compute node management
-  ###
-
-  # Preemptible instance support (e.g. AWS Spot Instances)
-  # When true, child containers will get created with the preemptible
-  # scheduling parameter parameter set.
-  preemptible_instances: false
-
-  # Docker image to be used when none found in runtime_constraints of a job
-  default_docker_image_for_jobs: false
-
-  # List of supported Docker Registry image formats that compute nodes
-  # are able to use. `arv keep docker` will error out if a user tries
-  # to store an image with an unsupported format. Use an empty array
-  # to skip the compatibility check (and display a warning message to
-  # that effect).
-  #
-  # Example for sites running docker < 1.10: ["v1"]
-  # Example for sites running docker >= 1.10: ["v2"]
-  # Example for disabling check: []
-  docker_image_formats: ["v2"]
-
-  # :none or :slurm_immediate
-  crunch_job_wrapper: :none
-
-  # username, or false = do not set uid when running jobs.
-  crunch_job_user: crunch
-
-  # The web service must be able to create/write this file, and
-  # crunch-job must be able to stat() it.
-  crunch_refresh_trigger: /tmp/crunch_refresh_trigger
-
-  # Path to dns server configuration directory
-  # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
-  # files or touch restart.txt (see below).
-  dns_server_conf_dir: false
-
-  # Template file for the dns server host snippets. See
-  # unbound.template in this directory for an example. If false, do
-  # not write any config files.
-  dns_server_conf_template: false
-
-  # String to write to {dns_server_conf_dir}/restart.txt (with a
-  # trailing newline) after updating local data. If false, do not
-  # open or write the restart.txt file.
-  dns_server_reload_command: false
-
-  # Command to run after each DNS update. Template variables will be
-  # substituted; see the "unbound" example below. If false, do not run
-  # a command.
-  dns_server_update_command: false
-
-  ## Example for unbound:
-  #dns_server_conf_dir: /etc/unbound/conf.d
-  #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
-  ## ...plus one of the following two methods of reloading:
-  #dns_server_reload_command: unbound-control reload
-  #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
-
-  compute_node_domain: false
-  compute_node_nameservers:
-    - 192.168.1.1
-
-  # Hostname to assign to a compute node when it sends a "ping" and the
-  # hostname in its Node record is nil.
-  # During bootstrapping, the "ping" script is expected to notice the
-  # hostname given in the ping response, and update its unix hostname
-  # accordingly.
-  # If false, leave the hostname alone (this is appropriate if your compute
-  # nodes' hostnames are already assigned by some other mechanism).
-  #
-  # One way or another, the hostnames of your node records should agree
-  # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
-  #
-  # Example for compute0000, compute0001, ....:
-  # assign_node_hostname: compute%<slot_number>04d
-  # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
-  assign_node_hostname: compute%<slot_number>d
-
-
-  ###
-  ### Job and container reuse logic.
-  ###
-
-  # Include details about job reuse decisions in the server log. This
-  # causes additional database queries to run, so it should not be
-  # enabled unless you expect to examine the resulting logs for
-  # troubleshooting purposes.
-  log_reuse_decisions: false
-
-  # Control job reuse behavior when two completed jobs match the
-  # search criteria and have different outputs.
-  #
-  # If true, in case of a conflict, reuse the earliest job (this is
-  # similar to container reuse behavior).
-  #
-  # If false, in case of a conflict, do not reuse any completed job,
-  # but do reuse an already-running job if available (this is the
-  # original job reuse behavior, and is still the default).
-  reuse_job_if_outputs_differ: false
-
-  ###
-  ### Federation support.
-  ###
-
-  # You can enable use of this cluster by users who are authenticated
-  # by a remote Arvados site. Control which remote hosts are trusted
-  # to authenticate which user IDs by configuring remote_hosts,
-  # remote_hosts_via_dns, or both. The default configuration disables
-  # remote authentication.
-
-  # Map known prefixes to hosts. For example, if user IDs beginning
-  # with "zzzzz-" should be authenticated by the Arvados server at
-  # "zzzzz.example.com", use:
-  #
-  # remote_hosts:
-  #   zzzzz: zzzzz.example.com
-  remote_hosts: {}
-
-  # Use {prefix}.arvadosapi.com for any prefix not given in
-  # remote_hosts above.
-  remote_hosts_via_dns: false
-
-  # List of cluster prefixes.  These are "trusted" clusters, users
-  # from the clusters listed here will be automatically setup and
-  # activated.  This is separate from the settings
-  # auto_setup_new_users and new_users_are_active.
-  auto_activate_users_from: []
-
-  ###
-  ### Remaining assorted configuration options.
-  ###
-
-  arvados_theme: default
-
-  # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the
-  # Single Sign On (sso) server and remote Arvados sites.  Should only
-  # be enabled during development when the SSO server is using a
-  # self-signed cert.
-  sso_insecure: false
 
   ## Set Time.zone default to the specified zone and make Active
   ## Record auto-convert to this zone.  Run "rake -D time" for a list
@@ -472,17 +28,6 @@ common:
   # Version of your assets, change this if you want to expire all your assets
   assets.version: "1.0"
 
-  # Allow clients to create collections by providing a manifest with
-  # unsigned data blob locators. IMPORTANT: This effectively disables
-  # access controls for data stored in Keep: a client who knows a hash
-  # can write a manifest that references the hash, pass it to
-  # collections.create (which will create a permission link), use
-  # collections.get to obtain a signature for that data locator, and
-  # use that signed locator to retrieve the data from Keep. Therefore,
-  # do not turn this on if your users expect to keep data private from
-  # one another!
-  permit_create_collection_with_unsigned_manifest: false
-
   default_openid_prefix: https://www.google.com/accounts/o8/id
 
   # Override the automatic version string. With the default value of
@@ -496,42 +41,6 @@ common:
   # (included in vendor packages).
   package_version: false
 
-  # Default value for container_count_max for container requests.  This is the
-  # number of times Arvados will create a new container to satisfy a container
-  # request.  If a container is cancelled it will retry a new container if
-  # container_count < container_count_max on any container requests associated
-  # with the cancelled container.
-  container_count_max: 3
-
-  # Default value for keep_cache_ram of a container's runtime_constraints.
-  container_default_keep_cache_ram: 268435456
-
-  # Token to be included in all healthcheck requests. Disabled by default.
-  # Server expects request header of the format "Authorization: Bearer xxx"
-  ManagementToken: false
-
-  # URL of keep-web service.  Provides read/write access to collections via
-  # HTTP and WebDAV protocols.
-  #
-  # Example:
-  # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/
-  keep_web_service_url: false
-
-  # If true, enable collection versioning.
-  # When a collection's preserve_version field is true or the current version
-  # is older than the amount of seconds defined on preserve_version_if_idle,
-  # a snapshot of the collection's previous state is created and linked to
-  # the current collection.
-  collection_versioning: false
-  #   0 = auto-create a new version on every update.
-  #  -1 = never auto-create new versions.
-  # > 0 = auto-create a new version when older than the specified number of seconds.
-  preserve_version_if_idle: -1
-
-  # Number of times a container can be unlocked before being
-  # automatically cancelled.
-  max_container_dispatch_attempts: 5
-
 development:
   force_ssl: false
   cache_classes: false
@@ -560,10 +69,6 @@ production:
 test:
   force_ssl: false
   cache_classes: true
-  public_file_server:
-    enabled: true
-    headers:
-      'Cache-Control': public, max-age=3600
   whiny_nils: true
   consider_all_requests_local: true
   action_controller.perform_caching: false
index d6fcc9ea09be03a321d89318c8b0bf9c442a4abe..9a4270ad9df4384d88c97cf100ae83790a1a35ba 100644 (file)
@@ -45,6 +45,8 @@ module Server
     # The following is to avoid SafeYAML's warning message
     SafeYAML::OPTIONS[:default_mode] = :safe
 
+    require_relative "arvados_config.rb"
+
     # Settings in config/environments/* take precedence over those specified here.
     # Application configuration should go into files in config/initializers
     # -- all .rb files in that directory are automatically loaded.
diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb
new file mode 100644 (file)
index 0000000..669beb1
--- /dev/null
@@ -0,0 +1,275 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+#
+# Load Arvados configuration from /etc/arvados/config.yml, using defaults
+# from config.default.yml
+#
+# Existing application.yml is migrated into the new config structure.
+# Keys in the legacy application.yml take precedence.
+#
+# Use "bundle exec config:dump" to get the complete active configuration
+#
+# Use "bundle exec config:migrate" to migrate application.yml and
+# database.yml to config.yml.  After adding the output of
+# config:migrate to /etc/arvados/config.yml, you will be able to
+# delete application.yml and database.yml.
+
+require 'config_loader'
+
+begin
+  # If secret_token.rb exists here, we need to load it first.
+  require_relative 'secret_token.rb'
+rescue LoadError
+  # Normally secret_token.rb is missing and the secret token is
+  # configured by application.yml (i.e., here!) instead.
+end
+
+if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
+    not defined? WARNED_OMNIAUTH_CONFIG)
+  Rails.logger.warn <<-EOS
+DEPRECATED CONFIGURATION:
+ Please move your SSO provider config into config/application.yml
+ and delete config/initializers/omniauth.rb.
+EOS
+  # Real values will be copied from globals by omniauth_init.rb. For
+  # now, assign some strings so the generic *.yml config loader
+  # doesn't overwrite them or complain that they're missing.
+  Rails.configuration.Login["ProviderAppID"] = 'xxx'
+  Rails.configuration.Login["ProviderAppSecret"] = 'xxx'
+  Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx'
+  WARNED_OMNIAUTH_CONFIG = true
+end
+
+# Load the defaults
+$arvados_config_defaults = ConfigLoader.load "#{::Rails.root.to_s}/config/config.default.yml"
+if $arvados_config_defaults.empty?
+  raise "Missing #{::Rails.root.to_s}/config/config.default.yml"
+end
+
+clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first
+$arvados_config_defaults = clusterConfig
+$arvados_config_defaults["ClusterID"] = clusterID
+
+# Initialize the global config with the defaults
+$arvados_config_global = $arvados_config_defaults.deep_dup
+
+# Load the global config file
+confs = ConfigLoader.load "/etc/arvados/config.yml"
+if !confs.empty?
+  clusterID, clusterConfig = confs["Clusters"].first
+  $arvados_config_global["ClusterID"] = clusterID
+
+  # Copy the cluster config over the defaults
+  $arvados_config_global.deep_merge!(clusterConfig)
+end
+
+# Now make a copy
+$arvados_config = $arvados_config_global.deep_dup
+
+# Declare all our configuration items.
+arvcfg = ConfigLoader.new
+arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
+arvcfg.declare_config "ManagementToken", String, :ManagementToken
+arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
+arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods
+arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
+arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
+arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
+arvcfg.declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
+arvcfg.declare_config "API.RailsSessionSecretToken", NonemptyString, :secret_token
+arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
+arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
+arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
+arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
+arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
+arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
+arvcfg.declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
+arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
+arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
+arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
+arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
+arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
+arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
+arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
+arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
+arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
+arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
+arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
+arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
+arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
+arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
+arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
+arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
+arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
+arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
+arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
+arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
+arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
+arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
+arvcfg.declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
+arvcfg.declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
+arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
+arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
+arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
+arvcfg.declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
+arvcfg.declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
+arvcfg.declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
+arvcfg.declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
+arvcfg.declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
+arvcfg.declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
+arvcfg.declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
+arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
+arvcfg.declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
+arvcfg.declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+arvcfg.declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
+arvcfg.declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
+arvcfg.declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
+arvcfg.declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+arvcfg.declare_config "Services.Controller.ExternalURL", URI
+arvcfg.declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
+arvcfg.declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
+arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
+arvcfg.declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
+arvcfg.declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
+arvcfg.declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
+  h = if cfg["RemoteClusters"] then
+        cfg["RemoteClusters"].deep_dup
+      else
+        {}
+      end
+  v.each do |clusterid, host|
+    if h[clusterid].nil?
+      h[clusterid] = {
+        "Host" => host,
+        "Proxy" => true,
+        "Scheme" => "https",
+        "Insecure" => false,
+        "ActivateUsers" => false
+      }
+    end
+  end
+  ConfigLoader.set_cfg cfg, "RemoteClusters", h
+}
+arvcfg.declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
+
+dbcfg = ConfigLoader.new
+
+dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
+dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
+dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
+dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
+dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
+dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
+dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template
+dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding
+
+application_config = {}
+%w(application.default application).each do |cfgfile|
+  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+  confs = ConfigLoader.load(path, erb: true)
+  # Ignore empty YAML file:
+  next if confs == false
+  application_config.deep_merge!(confs['common'] || {})
+  application_config.deep_merge!(confs[::Rails.env.to_s] || {})
+end
+
+db_config = {}
+path = "#{::Rails.root.to_s}/config/database.yml"
+if File.exist? path
+  db_config = ConfigLoader.load(path, erb: true)
+end
+
+$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)
+dbcfg.migrate_config(db_config[::Rails.env.to_s] || {}, $arvados_config)
+
+if application_config[:auto_activate_users_from]
+  application_config[:auto_activate_users_from].each do |cluster|
+    if $arvados_config.RemoteClusters[cluster]
+      $arvados_config.RemoteClusters[cluster]["ActivateUsers"] = true
+    end
+  end
+end
+
+if application_config[:host] || application_config[:port] || application_config[:scheme]
+  if !application_config[:host] || application_config[:host].empty?
+    raise "Must set 'host' when setting 'port' or 'scheme'"
+  end
+  $arvados_config.Services["Controller"]["ExternalURL"] = URI((application_config[:scheme] || "https")+"://"+application_config[:host]+
+                                                              (if application_config[:port] then ":#{application_config[:port]}" else "" end))
+end
+
+# Checks for wrongly typed configuration items, coerces properties
+# into correct types (such as Duration), and optionally raise error
+# for essential configuration that can't be empty.
+arvcfg.coercion_and_check $arvados_config_defaults, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config_global, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config, check_nonempty: true
+dbcfg.coercion_and_check $arvados_config, check_nonempty: true
+
+# * $arvados_config_defaults is the defaults
+# * $arvados_config_global is $arvados_config_defaults merged with the contents of /etc/arvados/config.yml
+# These are used by the rake config: tasks
+#
+# * $arvados_config is $arvados_config_global merged with the migrated contents of application.yml
+# This is what actually gets copied into the Rails configuration object.
+
+if $arvados_config["Collections"]["DefaultTrashLifetime"] < 86400.seconds then
+  raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections.DefaultTrashLifetime
+end
+
+#
+# Special case for test database where there's no database.yml,
+# because the Arvados config.yml doesn't have a concept of multiple
+# rails environments.
+#
+if ::Rails.env.to_s == "test" && db_config["test"].nil?
+  $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test"
+end
+
+if $arvados_config["PostgreSQL"]["Connection"]["Password"].empty?
+  raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
+end
+
+dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"]
+if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0
+  dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}"
+end
+
+#
+# If DATABASE_URL is set, then ActiveRecord won't error out if database.yml doesn't exist.
+#
+# For config migration, we've previously populated the PostgreSQL
+# section of the config from database.yml
+#
+ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:"+
+                      "#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@"+
+                      "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?"+
+                      "template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&"+
+                      "encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
+                      "pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
+
+Server::Application.configure do
+  # Copy into the Rails config object.  This also turns Hash into
+  # OrderedOptions so that application code can use
+  # Rails.configuration.API.Blah instead of
+  # Rails.configuration.API["Blah"]
+  ConfigLoader.copy_into_config $arvados_config, config
+  ConfigLoader.copy_into_config $remaining_config, config
+  secrets.secret_key_base = $arvados_config["API"]["RailsSessionSecretToken"]
+end
diff --git a/services/api/config/config.default.yml b/services/api/config/config.default.yml
new file mode 120000 (symlink)
index 0000000..f039aa0
--- /dev/null
@@ -0,0 +1 @@
+../../../lib/config/config.default.yml
\ No newline at end of file
index 9ea6b2884374317e65c1cbc1027a1f0581b3a103..8f3b3cb5f8e951df55979a1f74adce8b847de652 100644 (file)
@@ -5,7 +5,6 @@
 # Config must be done before we  files; otherwise they
 # won't be able to use Rails.configuration.* to initialize their
 # classes.
-require_relative 'load_config.rb'
 
 require 'enable_jobs_api'
 
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
deleted file mode 100644 (file)
index 16059ca..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-begin
-  # If secret_token.rb exists here, we need to load it first.
-  require_relative 'secret_token.rb'
-rescue LoadError
-  # Normally secret_token.rb is missing and the secret token is
-  # configured by application.yml (i.e., here!) instead.
-end
-
-if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
-    not defined? WARNED_OMNIAUTH_CONFIG)
-  Rails.logger.warn <<-EOS
-DEPRECATED CONFIGURATION:
- Please move your SSO provider config into config/application.yml
- and delete config/initializers/omniauth.rb.
-EOS
-  # Real values will be copied from globals by omniauth_init.rb. For
-  # now, assign some strings so the generic *.yml config loader
-  # doesn't overwrite them or complain that they're missing.
-  Rails.configuration.sso_app_id = 'xxx'
-  Rails.configuration.sso_app_secret = 'xxx'
-  Rails.configuration.sso_provider_url = '//xxx'
-  WARNED_OMNIAUTH_CONFIG = true
-end
-
-$application_config = {}
-
-%w(application.default application).each do |cfgfile|
-  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
-  if File.exist? path
-    yaml = ERB.new(IO.read path).result(binding)
-    confs = YAML.load(yaml, deserialize_symbols: true)
-    # Ignore empty YAML file:
-    next if confs == false
-    $application_config.merge!(confs['common'] || {})
-    $application_config.merge!(confs[::Rails.env.to_s] || {})
-  end
-end
-
-Server::Application.configure do
-  nils = []
-  $application_config.each do |k, v|
-    # "foo.bar: baz" --> { config.foo.bar = baz }
-    cfg = config
-    ks = k.split '.'
-    k = ks.pop
-    ks.each do |kk|
-      cfg = cfg.send(kk)
-    end
-    if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
-      # Config must have been set already in environments/*.rb.
-      #
-      # After config files have been migrated, this mechanism should
-      # be deprecated, then removed.
-    elsif v.nil?
-      # Config variables are not allowed to be nil. Make a "naughty"
-      # list, and present it below.
-      nils << k
-    else
-      cfg.send "#{k}=", v
-    end
-  end
-  if !nils.empty?
-    raise <<EOS
-Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
-
-The following configuration settings must be specified in
-config/application.yml:
-* #{nils.join "\n* "}
-
-EOS
-  end
-  config.secret_key_base = config.secret_token
-end
index ef4e428bff0f97fafa0f0831beb98de52a2a164d..07dba3aef4ff9bb64db8c1236a73c0c97c15a794 100644 (file)
@@ -38,8 +38,8 @@ Server::Application.configure do
     end
 
     params_s = SafeJSON.dump(params)
-    if params_s.length > Rails.configuration.max_request_log_params_size
-      payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
+    if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]
+      payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]"
     else
       payload[:params] = params
     end
index b5e98943dfd9178dd44c61568d34563492222441..5610999a9405c05464279a8031ec2bc13ae55bf1 100644 (file)
@@ -9,15 +9,15 @@
 
 if defined? CUSTOM_PROVIDER_URL
   Rails.logger.warn "Copying omniauth from globals in legacy config file."
-  Rails.configuration.sso_app_id = APP_ID
-  Rails.configuration.sso_app_secret = APP_SECRET
-  Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+  Rails.configuration.Login["ProviderAppID"] = APP_ID
+  Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET
+  Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL
 else
   Rails.application.config.middleware.use OmniAuth::Builder do
     provider(:josh_id,
-             Rails.configuration.sso_app_id,
-             Rails.configuration.sso_app_secret,
-             Rails.configuration.sso_provider_url)
+             Rails.configuration.Login["ProviderAppID"],
+             Rails.configuration.Login["ProviderAppSecret"],
+             Rails.configuration.Services["SSO"]["ExternalURL"])
   end
   OmniAuth.config.on_failure = StaticController.action(:login_failure)
 end
index 0ab2b032a4c0e4e7b55da23512ee62fd03c4ac88..713c61fd75a6e75794b31afa7a1d6ca3f6005a43 100644 (file)
@@ -7,7 +7,6 @@
 # Config must be done before we load model class files; otherwise they
 # won't be able to use Rails.configuration.* to initialize their
 # classes.
-require_relative 'load_config.rb'
 
 if Rails.env == 'development'
   Dir.foreach("#{Rails.root}/app/models") do |model_file|
diff --git a/services/api/config/secrets.yml b/services/api/config/secrets.yml
deleted file mode 100644 (file)
index f21de2d..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-# Be sure to restart your server when you modify this file.
-
-# Your secret key is used for verifying the integrity of signed cookies.
-# If you change this key, all old signed cookies will become invalid!
-
-# Make sure the secret is at least 30 characters and all random,
-# no regular words or you'll be exposed to dictionary attacks.
-# You can use `rails secret` to generate a secure secret key.
-
-# Make sure the secrets in this file are kept private
-# if you're sharing your code publicly.
-
-development:
-  secret_key_base: ef8dfe92893202f906d198094f428aaefa75749338e306ed2874938598cad7153ef0dd3cb8036c618cc7c27bb0c6c559728e8cc224da7cdfa2ad1d02874643b0
-
-test:
-  secret_key_base: 0b5454fe8163063950a7124348e2bc780fabbb022fa15f8a074c2fbcfce8eca480ed46b549b87738904f2bae6617ad949c3c3579e272d486c25aaa0ead563355
-
-# Do not keep production secrets in the repository,
-# instead read values from the environment.
-production:
-  secret_key_base: <%= ENV["SECRET_KEY_BASE"] %>
diff --git a/services/api/db/migrate/20190422144631_fill_missing_modified_at.rb b/services/api/db/migrate/20190422144631_fill_missing_modified_at.rb
new file mode 100644 (file)
index 0000000..5075e48
--- /dev/null
@@ -0,0 +1,11 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class FillMissingModifiedAt < ActiveRecord::Migration[5.0]
+  def up
+    Collection.where('modified_at is null').update_all('modified_at = created_at')
+  end
+  def down
+  end
+end
index ebe06b34843aec8be5427e624e21db2653aef337..0408b5265b241b9f371b2da07699e75dac9710d3 100644 (file)
@@ -3059,5 +3059,7 @@ INSERT INTO "schema_migrations" (version) VALUES
 ('20181011184200'),
 ('20181213183234'),
 ('20190214214814'),
-('20190322174136');
+('20190322174136'),
+('20190422144631');
+
 
index 56fd935f3fdc9f0fe9055067edf45fe44df59003..e97f65a97397c86474fa69d190e92cd1fabfe8ce 100644 (file)
@@ -44,8 +44,8 @@ module AuditLogs
   end
 
   def self.tidy_in_background
-    max_age = Rails.configuration.max_audit_log_age
-    max_batch = Rails.configuration.max_audit_log_delete_batch
+    max_age = Rails.configuration.AuditLogs.MaxAge
+    max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch
     return if max_age <= 0 || max_batch <= 0
 
     exp = (max_age/14).seconds
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
new file mode 100644 (file)
index 0000000..90b6d9d
--- /dev/null
@@ -0,0 +1,205 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module Psych
+  module Visitors
+    class YAMLTree < Psych::Visitors::Visitor
+      def visit_ActiveSupport_Duration o
+        seconds = o.to_i
+        outstr = ""
+        if seconds / 3600 > 0
+          outstr += "#{seconds / 3600}h"
+          seconds = seconds % 3600
+        end
+        if seconds / 60 > 0
+          outstr += "#{seconds / 60}m"
+          seconds = seconds % 60
+        end
+        if seconds > 0
+          outstr += "#{seconds}s"
+        end
+        if outstr == ""
+          outstr = "0s"
+        end
+        @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY
+      end
+
+      def visit_URI_Generic o
+        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+      end
+
+      def visit_URI_HTTP o
+        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+      end
+
+      def visit_Pathname o
+        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+      end
+    end
+  end
+end
+
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+class NonemptyString < String
+end
+
+class ConfigLoader
+  def initialize
+    @config_migrate_map = {}
+    @config_types = {}
+  end
+
+  def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
+    if migrate_from
+      @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
+        ConfigLoader.set_cfg cfg, assign_to, v
+      }
+    end
+    @config_types[assign_to] = configtype
+  end
+
+
+  def migrate_config from_config, to_config
+    remainders = {}
+    from_config.each do |k, v|
+      if @config_migrate_map[k.to_sym]
+        @config_migrate_map[k.to_sym].call to_config, k, v
+      else
+        remainders[k] = v
+      end
+    end
+    remainders
+  end
+
+  def coercion_and_check check_cfg, check_nonempty: true
+    @config_types.each do |cfgkey, cfgtype|
+      cfg = check_cfg
+      k = cfgkey
+      ks = k.split '.'
+      k = ks.pop
+      ks.each do |kk|
+        cfg = cfg[kk]
+        if cfg.nil?
+          break
+        end
+      end
+
+      if cfg.nil?
+        raise "missing #{cfgkey}"
+      end
+
+      if cfgtype == String and !cfg[k]
+        cfg[k] = ""
+      end
+
+      if cfgtype == String and cfg[k].is_a? Symbol
+        cfg[k] = cfg[k].to_s
+      end
+
+      if cfgtype == Pathname and cfg[k].is_a? String
+
+        if cfg[k] == ""
+          cfg[k] = Pathname.new("")
+        else
+          cfg[k] = Pathname.new(cfg[k])
+          if !cfg[k].exist?
+            raise "#{cfgkey} path #{cfg[k]} does not exist"
+          end
+        end
+      end
+
+      if cfgtype == NonemptyString
+        if (!cfg[k] || cfg[k] == "") && check_nonempty
+          raise "#{cfgkey} cannot be empty"
+        end
+        if cfg[k].is_a? String
+          next
+        end
+      end
+
+      if cfgtype == ActiveSupport::Duration
+        if cfg[k].is_a? Integer
+          cfg[k] = cfg[k].seconds
+        elsif cfg[k].is_a? String
+          cfg[k] = ConfigLoader.parse_duration cfg[k]
+        end
+      end
+
+      if cfgtype == URI
+        cfg[k] = URI(cfg[k])
+      end
+
+      if !cfg[k].is_a? cfgtype
+        raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+      end
+    end
+  end
+
+  def self.set_cfg cfg, k, v
+    # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
+    ks = k.split '.'
+    k = ks.pop
+    ks.each do |kk|
+      cfg = cfg[kk]
+      if cfg.nil?
+        break
+      end
+    end
+    if !cfg.nil?
+      cfg[k] = v
+    end
+  end
+
+  def self.parse_duration durstr
+    duration_re = /(\d+(\.\d+)?)(s|m|h)/
+    dursec = 0
+    while durstr != ""
+      mt = duration_re.match durstr
+      if !mt
+        raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h"
+      end
+      multiplier = {s: 1, m: 60, h: 3600}
+      dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+      durstr = durstr[mt[0].length..-1]
+    end
+    return dursec.seconds
+  end
+
+  def self.copy_into_config src, dst
+    src.each do |k, v|
+      dst.send "#{k}=", self.to_OrderedOptions(v)
+    end
+  end
+
+  def self.to_OrderedOptions confs
+    if confs.is_a? Hash
+      opts = ActiveSupport::OrderedOptions.new
+      confs.each do |k,v|
+        opts[k] = self.to_OrderedOptions(v)
+      end
+      opts
+    elsif confs.is_a? Array
+      confs.map { |v| self.to_OrderedOptions v }
+    else
+      confs
+    end
+  end
+
+  def self.load path, erb: false
+    if File.exist? path
+      yaml = IO.read path
+      if erb
+        yaml = ERB.new(yaml).result(binding)
+      end
+      YAML.load(yaml, deserialize_symbols: false)
+    else
+      {}
+    end
+  end
+
+end
index 449d7d51626a1963ab39e83e3e95998f50d21b1e..4e640186d1994a2f8d404d963233ddac1db3a356 100644 (file)
@@ -31,13 +31,13 @@ class CrunchDispatch
     @cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
     @srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
 
-    @arvados_internal = Rails.configuration.git_internal_dir
+    @arvados_internal = Rails.configuration.Containers.JobsAPI.GitInternalDir
     if not File.exist? @arvados_internal
       $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
       raise "No internal git repository available" unless ($? == 0)
     end
 
-    @repo_root = Rails.configuration.git_repositories_dir
+    @repo_root = Rails.configuration.Git.Repositories
     @arvados_repo_path = Repository.where(name: "arvados").first.server_path
     @authorizations = {}
     @did_recently = {}
@@ -110,7 +110,7 @@ class CrunchDispatch
   end
 
   def update_node_status
-    return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/)
+    return unless Rails.configuration.Containers.JobsAPI.CrunchJobWrapper.to_s.match(/^slurm/)
     slurm_status.each_pair do |hostname, slurmdata|
       next if @node_state[hostname] == slurmdata
       begin
@@ -337,14 +337,14 @@ class CrunchDispatch
       next if @running[job.uuid]
 
       cmd_args = nil
-      case Server::Application.config.crunch_job_wrapper
-      when :none
+      case Rails.configuration.Containers.JobsAPI.CrunchJobWrapper
+      when "none"
         if @running.size > 0
             # Don't run more than one at a time.
             return
         end
         cmd_args = []
-      when :slurm_immediate
+      when "slurm_immediate"
         nodelist = nodes_available_for_job(job)
         if nodelist.nil?
           if Time.now < @node_wait_deadline
@@ -361,7 +361,7 @@ class CrunchDispatch
                     "--job-name=#{job.uuid}",
                     "--nodelist=#{nodelist.join(',')}"]
       else
-        raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
+        raise "Unknown crunch_job_wrapper: #{Rails.configuration.Containers.JobsAPI.CrunchJobWrapper}"
       end
 
       cmd_args = sudo_preface + cmd_args
@@ -460,7 +460,7 @@ class CrunchDispatch
         bytes_logged: 0,
         events_logged: 0,
         log_throttle_is_open: true,
-        log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+        log_throttle_reset_time: Time.now + Rails.configuration.Containers.Logging.LogThrottlePeriod,
         log_throttle_bytes_so_far: 0,
         log_throttle_lines_so_far: 0,
         log_throttle_bytes_skipped: 0,
@@ -485,7 +485,7 @@ class CrunchDispatch
       matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/)
       if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]')
         partial_line = true
-        if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period
+        if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod
           running_job[:log_throttle_partial_line_last_at] = Time.now
         else
           skip_counts = true
@@ -499,26 +499,26 @@ class CrunchDispatch
       end
 
       if (running_job[:bytes_logged] >
-          Rails.configuration.crunch_limit_log_bytes_per_job)
-        message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+          Rails.configuration.Containers.Logging.LimitLogBytesPerJob)
+        message = "Exceeded log limit #{Rails.configuration.Containers.Logging.LimitLogBytesPerJob} bytes (LimitLogBytesPerJob). Log will be truncated."
         running_job[:log_throttle_reset_time] = Time.now + 100.years
         running_job[:log_throttle_is_open] = false
 
       elsif (running_job[:log_throttle_bytes_so_far] >
-             Rails.configuration.crunch_log_throttle_bytes)
+             Rails.configuration.Containers.Logging.LogThrottleBytes)
         remaining_time = running_job[:log_throttle_reset_time] - Time.now
-        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds."
+        message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleBytes} bytes per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleBytes). Logging will be silenced for the next #{remaining_time.round} seconds."
         running_job[:log_throttle_is_open] = false
 
       elsif (running_job[:log_throttle_lines_so_far] >
-             Rails.configuration.crunch_log_throttle_lines)
+             Rails.configuration.Containers.Logging.LogThrottleLines)
         remaining_time = running_job[:log_throttle_reset_time] - Time.now
-        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds."
+        message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleLines} lines per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleLines), logging will be silenced for the next #{remaining_time.round} seconds."
         running_job[:log_throttle_is_open] = false
 
       elsif partial_line and running_job[:log_throttle_first_partial_line]
         running_job[:log_throttle_first_partial_line] = false
-        message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds."
+        message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod} seconds."
       end
     end
 
@@ -552,7 +552,7 @@ class CrunchDispatch
           j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
         end
 
-        j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+        j[:log_throttle_reset_time] = now + Rails.configuration.Containers.Logging.LogThrottlePeriod
         j[:log_throttle_bytes_so_far] = 0
         j[:log_throttle_lines_so_far] = 0
         j[:log_throttle_bytes_skipped] = 0
@@ -592,7 +592,7 @@ class CrunchDispatch
         bufend = ''
         streambuf.each_line do |line|
           if not line.end_with? $/
-            if line.size > Rails.configuration.crunch_log_throttle_bytes
+            if line.size > Rails.configuration.Containers.Logging.LogThrottleBytes
               # Without a limit here, we'll use 2x an arbitrary amount
               # of memory, and waste a lot of time copying strings
               # around, all without providing any feedback to anyone
@@ -775,7 +775,7 @@ class CrunchDispatch
 
     # This is how crunch-job child procs know where the "refresh"
     # trigger file is
-    ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+    ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger
 
     # If salloc can't allocate resources immediately, make it use our
     # temporary failure exit code.  This ensures crunch-dispatch won't
@@ -902,9 +902,9 @@ class CrunchDispatch
   end
 
   def sudo_preface
-    return [] if not Server::Application.config.crunch_job_user
+    return [] if not Rails.configuration.Containers.JobsAPI.CrunchJobUser
     ["sudo", "-E", "-u",
-     Server::Application.config.crunch_job_user,
+     Rails.configuration.Containers.JobsAPI.CrunchJobUser,
      "LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}",
      "PATH=#{ENV['PATH']}",
      "PERLLIB=#{ENV['PERLLIB']}",
@@ -937,8 +937,8 @@ class CrunchDispatch
     # Send out to log event if buffer size exceeds the bytes per event or if
     # it has been at least crunch_log_seconds_between_events seconds since
     # the last flush.
-    if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
-        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+    if running_job[:stderr_buf_to_flush].size > Rails.configuration.Containers.Logging.LogBytesPerEvent or
+        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.Containers.Logging.LogSecondsBetweenEvents
       begin
         log = Log.new(object_uuid: running_job[:job].uuid,
                       event_type: 'stderr',
@@ -957,7 +957,7 @@ class CrunchDispatch
 
   # An array of job_uuids in squeue
   def squeue_jobs
-    if Rails.configuration.crunch_job_wrapper == :slurm_immediate
+    if Rails.configuration.Containers.JobsAPI.CrunchJobWrapper == "slurm_immediate"
       p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
       begin
         p.readlines.map {|line| line.strip}
index 49638677b18cddaeb9f27501c9d02f58d34a93f2..c7b48c0cdd6ff1ed0056a32e49f65c106afc100c 100644 (file)
@@ -42,25 +42,25 @@ module CurrentApiClient
   end
 
   def system_user_uuid
-    [Server::Application.config.uuid_prefix,
+    [Rails.configuration.ClusterID,
      User.uuid_prefix,
      '000000000000000'].join('-')
   end
 
   def system_group_uuid
-    [Server::Application.config.uuid_prefix,
+    [Rails.configuration.ClusterID,
      Group.uuid_prefix,
      '000000000000000'].join('-')
   end
 
   def anonymous_group_uuid
-    [Server::Application.config.uuid_prefix,
+    [Rails.configuration.ClusterID,
      Group.uuid_prefix,
      'anonymouspublic'].join('-')
   end
 
   def anonymous_user_uuid
-    [Server::Application.config.uuid_prefix,
+    [Rails.configuration.ClusterID,
      User.uuid_prefix,
      'anonymouspublic'].join('-')
   end
@@ -105,7 +105,7 @@ module CurrentApiClient
   end
 
   def all_users_group_uuid
-    [Server::Application.config.uuid_prefix,
+    [Rails.configuration.ClusterID,
      Group.uuid_prefix,
      'fffffffffffffff'].join('-')
   end
index 63543ab3ad52cef568a464d9c68fba9ee4e177a5..c909ae92276480d38f6a0b5ada592c4efb2ea5a7 100644 (file)
@@ -31,9 +31,9 @@ Disable_jobs_api_method_list = ["jobs.create",
                                                "job_tasks.show"]
 
 def check_enable_legacy_jobs_api
-  if Rails.configuration.enable_legacy_jobs_api == false ||
-     (Rails.configuration.enable_legacy_jobs_api == "auto" &&
+  if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
+     (Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
       Job.count == 0)
-    Rails.configuration.disable_api_methods += Disable_jobs_api_method_list
+    Rails.configuration.API.DisabledAPIs += Disable_jobs_api_method_list
   end
 end
index 60db53d5e687440f4297b0ea29fc5893e52cf68c..2074566941fec7c6a79903df712b82541e9a5853 100644 (file)
@@ -30,7 +30,7 @@ module HasUuid
       Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
     end
     def generate_uuid
-      [Server::Application.config.uuid_prefix,
+      [Rails.configuration.ClusterID,
        self.uuid_prefix,
        rand(2**256).to_s(36)[-15..-1]].
         join '-'
index bb6c1f48a80b121a2a7fee7e88a619400c93c07a..f18c0edda00c5101557f0a275b6f2c99648b2b2a 100644 (file)
@@ -40,7 +40,7 @@ module OmniAuth
         options.client_options[:site] = options[:custom_provider_url]
         options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
         options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
-        if Rails.configuration.sso_insecure
+        if Rails.configuration.TLS.Insecure
           options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
         end
         ::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
index 736f270e962700c79c573fed841f5c0bde98b8ff..7119eb234800bf12e1460983ed7950ff9e0e4819 100644 (file)
@@ -56,7 +56,7 @@ module LoadParam
         raise ArgumentError.new("Invalid value for limit parameter")
       end
       @limit = [params[:limit].to_i,
-                Rails.configuration.max_items_per_response].min
+                Rails.configuration.API.MaxItemsPerResponse].min
     else
       @limit = DEFAULT_LIMIT
     end
index ed5cc82bfd7a5027fc53be4a07c7e3559f15d283..5c7efd7ded2e0e28ae0167aa357b0e37df810d5a 100644 (file)
@@ -9,7 +9,7 @@ module LogReuseInfo
   # doing expensive things like database queries, and we want to skip
   # those when logging is disabled.
   def log_reuse_info(candidates=nil)
-    if Rails.configuration.log_reuse_decisions
+    if Rails.configuration.Containers.LogReuseDecisions
       msg = yield
       if !candidates.nil?
         msg = "have #{candidates.count} candidates " + msg
index 25be3c08d4d40d8f7dbc7307e76bc7d2423997a6..5d6081f262b25756dd7e3fcdeec42ae762c4687f 100644 (file)
@@ -12,8 +12,8 @@ def do_refresh_permission_view
 end
 
 def refresh_permission_view(async=false)
-  if async and Rails.configuration.async_permissions_update_interval > 0
-    exp = Rails.configuration.async_permissions_update_interval.seconds
+  if async and Rails.configuration.API.AsyncPermissionsUpdateInterval > 0
+    exp = Rails.configuration.API.AsyncPermissionsUpdateInterval.seconds
     need = false
     Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do
       need = true
index bedbd68a44c8a9e988c202a21457281f680e840a..8613c749cf247c6c11f309c4d43cddc544e99b4f 100644 (file)
@@ -55,8 +55,8 @@ module SweepTrashedObjects
   end
 
   def self.sweep_if_stale
-    return if Rails.configuration.trash_sweep_interval <= 0
-    exp = Rails.configuration.trash_sweep_interval.seconds
+    return if Rails.configuration.Collections.TrashSweepInterval <= 0
+    exp = Rails.configuration.Collections.TrashSweepInterval.seconds
     need = false
     Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
       need = true
diff --git a/services/api/lib/tasks/config.rake b/services/api/lib/tasks/config.rake
new file mode 100644 (file)
index 0000000..6067208
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def diff_hash base, final
+  diffed = {}
+  base.each do |k,v|
+    bk = base[k]
+    fk = final[k]
+    if bk.is_a? Hash
+      d = diff_hash bk, fk
+      if d.length > 0
+        diffed[k] = d
+      end
+    else
+      if bk.to_yaml != fk.to_yaml
+        diffed[k] = fk
+      end
+    end
+  end
+  diffed
+end
+
+namespace :config do
+  desc 'Print items that differ between legacy application.yml and system config.yml'
+  task diff: :environment do
+    diffed = diff_hash $arvados_config_global, $arvados_config
+    cfg = { "Clusters" => {}}
+    cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+    if cfg["Clusters"][$arvados_config["ClusterID"]].empty?
+      puts "No migrations required for /etc/arvados/config.yml"
+    else
+      puts cfg.to_yaml
+    end
+  end
+
+  desc 'Print config.yml after merging with legacy application.yml'
+  task migrate: :environment do
+    diffed = diff_hash $arvados_config_defaults, $arvados_config
+    cfg = { "Clusters" => {}}
+    cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+    puts cfg.to_yaml
+  end
+
+  desc 'Print configuration as accessed through Rails.configuration'
+  task dump: :environment do
+    combined = $arvados_config.deep_dup
+    combined.update $remaining_config
+    puts combined.to_yaml
+  end
+
+  desc 'Legacy config check task -- it is a noop now'
+  task check: :environment do
+    # This exists so that build/rails-package-scripts/postinst.sh doesn't fail.
+  end
+end
diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake
deleted file mode 100644 (file)
index 4f071f1..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
-  desc 'Ensure site configuration has all required settings'
-  task check: :environment do
-    $stderr.puts "%-32s %s" % ["AppVersion (discovered)", AppVersion.hash]
-    $application_config.sort.each do |k, v|
-      if ENV.has_key?('QUIET') then
-        # Make sure we still check for the variable to exist
-        eval("Rails.configuration.#{k}")
-      else
-        if /(password|secret|signing_key)/.match(k) then
-          # Make sure we still check for the variable to exist, but don't print the value
-          eval("Rails.configuration.#{k}")
-          $stderr.puts "%-32s %s" % [k, '*********']
-        else
-          $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
-        end
-      end
-    end
-    # default_trash_lifetime cannot be less than 24 hours
-    if Rails.configuration.default_trash_lifetime < 86400 then
-      raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
-    end
-  end
-end
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
deleted file mode 100644 (file)
index ed34960..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
-  desc 'Show site configuration'
-  task dump: :environment do
-    puts $application_config.to_yaml
-  end
-end
index b45113e8a5a93fea183c05f06cd2eaffa1a04501..c5c5cdc76933dc833eb0fb5c8b46995152dc503f 100644 (file)
@@ -11,7 +11,7 @@ namespace :db do
   desc "Remove old container log entries from the logs table"
 
   task delete_old_container_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
index dcd92b19bcc4982b81e036d1fc76298b5fd2a673..3c1c049998377ffe79ac9cb3a2b512d34a6834f9 100644 (file)
@@ -9,7 +9,7 @@
 namespace :db do
   desc "Remove old job stderr entries from the logs table"
   task delete_old_job_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
index 96879629646b7e413e3c1e6c3e2110d7de0d7173..f7faabc4c262c990ee20ee60a9cdc829b1bf8214 100644 (file)
@@ -50,7 +50,7 @@ module Trashable
       if trash_at.nil?
         self.delete_at = nil
       else
-        self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds
+        self.delete_at = trash_at + Rails.configuration.Collections.DefaultTrashLifetime.seconds
       end
     elsif !trash_at || !delete_at || trash_at > delete_at
       # Not trash, or bogus arguments? Just validate in
@@ -65,7 +65,7 @@ module Trashable
       earliest_delete = [
         @validation_timestamp,
         trash_at_was,
-      ].compact.min + Rails.configuration.blob_signature_ttl.seconds
+      ].compact.min + Rails.configuration.Collections.BlobSigningTTL.seconds
 
       # The previous value of delete_at is also an upper bound on the
       # longest-lived permission token. For example, if TTL=14,
@@ -96,7 +96,7 @@ module TrashableController
       @object.update_attributes!(trash_at: db_current_time)
     end
     earliest_delete = (@object.trash_at +
-                       Rails.configuration.blob_signature_ttl.seconds)
+                       Rails.configuration.Collections.BlobSigningTTL.seconds)
     if @object.delete_at > earliest_delete
       @object.update_attributes!(delete_at: earliest_delete)
     end
index ee2b016cd53e29a230ca9b7ba5be3ec86022dfde..cc545b2fd1a92fbb892f3e8a78dc759996cb8b55 100644 (file)
@@ -11,7 +11,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
 
   def permit_unsigned_manifests isok=true
     # Set security model for the life of a test.
-    Rails.configuration.permit_create_collection_with_unsigned_manifest = isok
+    Rails.configuration.Collections.BlobSigning = !isok
   end
 
   def assert_signed_manifest manifest_text, label='', token: false
@@ -24,7 +24,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
         exp = tok[/\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16)
         sig = Blob.sign_locator(
           bare,
-          key: Rails.configuration.blob_signing_key,
+          key: Rails.configuration.Collections.BlobSigningKey,
           expire: exp,
           api_token: token)[/\+A[^\+]*/, 0]
         assert_includes tok, sig
@@ -88,7 +88,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
       token = api_client_authorizations(:active).send(token_method)
       signed = Blob.sign_locator(
         'acbd18db4cc2f85cedef654fccc4a4d8+3',
-        key: Rails.configuration.blob_signing_key,
+        key: Rails.configuration.Collections.BlobSigningKey,
         api_token: token)
       authorize_with_token token
       put :update, params: {
@@ -221,7 +221,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
   def request_capped_index(params={})
     authorize_with :user1_with_load
     coll1 = collections(:collection_1_of_201)
-    Rails.configuration.max_index_database_read =
+    Rails.configuration.API.MaxIndexDatabaseRead =
       yield(coll1.manifest_text.size)
     get :index, params: {
       select: %w(uuid manifest_text),
@@ -566,7 +566,7 @@ EOS
 
       # Build a manifest with both signed and unsigned locators.
       signing_opts = {
-        key: Rails.configuration.blob_signing_key,
+        key: Rails.configuration.Collections.BlobSigningKey,
         api_token: api_token(:active),
       }
       signed_locators = locators.collect do |x|
@@ -622,7 +622,7 @@ EOS
     # TODO(twp): in phase 4, all locators will need to be signed, so
     # this test should break and will need to be rewritten. Issue #2755.
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
       ttl: 3600   # 1 hour
     }
@@ -653,7 +653,7 @@ EOS
   test "create fails with invalid signature" do
     authorize_with :active
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
 
@@ -683,7 +683,7 @@ EOS
   test "create fails with uuid of signed manifest" do
     authorize_with :active
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
 
@@ -755,7 +755,7 @@ EOS
       ea10d51bcf88862dbcc36eb292017dfd+45)
 
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
 
@@ -903,7 +903,7 @@ EOS
 
   [1, 5, nil].each do |ask|
     test "Set replication_desired=#{ask.inspect}" do
-      Rails.configuration.default_collection_replication = 2
+      Rails.configuration.Collections.DefaultReplication = 2
       authorize_with :active
       put :update, params: {
         id: collections(:replication_undesired_unconfirmed).uuid,
@@ -1176,7 +1176,7 @@ EOS
     assert_response 200
     c = Collection.find_by_uuid(uuid)
     assert_operator c.trash_at, :<, db_current_time
-    assert_equal c.delete_at, c.trash_at + Rails.configuration.blob_signature_ttl
+    assert_equal c.delete_at, c.trash_at + Rails.configuration.Collections.BlobSigningTTL
   end
 
   test 'delete long-trashed collection immediately using http DELETE verb' do
@@ -1208,7 +1208,7 @@ EOS
       assert_response 200
       c = Collection.find_by_uuid(uuid)
       assert_operator c.trash_at, :<, db_current_time
-      assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.default_trash_lifetime
+      assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.Collections.DefaultTrashLifetime
     end
   end
 
@@ -1373,8 +1373,8 @@ EOS
   end
 
   test "update collection with versioning enabled" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 1 # 1 second
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second
 
     col = collections(:collection_owned_by_active)
     assert_equal 2, col.version
@@ -1383,7 +1383,7 @@ EOS
     token = api_client_authorizations(:active).v2token
     signed = Blob.sign_locator(
       'acbd18db4cc2f85cedef654fccc4a4d8+3',
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: token)
     authorize_with_token token
     put :update, params: {
index b596baaae49ba5c31ff5bd870bd58fa4a77dcf79..d49fe7a3e647caec2215c6f911fe5e37b1d6a5ab 100644 (file)
@@ -108,7 +108,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
       format: :json,
       count: 'none',
       limit: 1000,
-      filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+      filters: [['any', '@@', Rails.configuration.ClusterID]],
     }
 
     assert_response :success
@@ -137,7 +137,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
       limit: 1000,
       offset: '5',
       last_object_class: 'PipelineInstance',
-      filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+      filters: [['any', '@@', Rails.configuration.ClusterID]],
     }
 
     assert_response :success
index 37b606409ec46b89c67914313f19a6d2d827c6a8..cefb7f383076f27c65cfe3e07b2af97bab8dd9a6 100644 (file)
@@ -431,7 +431,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
   end
 
   test 'get contents with jobs and pipeline instances disabled' do
-    Rails.configuration.disable_api_methods = ['jobs.index', 'pipeline_instances.index']
+    Rails.configuration.API.DisabledAPIs = ['jobs.index', 'pipeline_instances.index']
 
     authorize_with :active
     get :contents, params: {
@@ -444,7 +444,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
   test 'get contents with low max_index_database_read' do
     # Some result will certainly have at least 12 bytes in a
     # restricted column
-    Rails.configuration.max_index_database_read = 12
+    Rails.configuration.API.MaxIndexDatabaseRead = 12
     authorize_with :active
     get :contents, params: {
           id: groups(:aproject).uuid,
index fb81f23636c84f79086a88a3a4e00733745b8572..b3e10bf4a4fc38eff96d181ecc77715ad0512a6f 100644 (file)
@@ -83,7 +83,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
     # We need to verify that "cancel" creates a trigger file, so first
     # let's make sure there is no stale trigger file.
     begin
-      File.unlink(Rails.configuration.crunch_refresh_trigger)
+      File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
     rescue Errno::ENOENT
     end
 
@@ -105,7 +105,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
                  'server should correct bogus cancelled_at ' +
                  job['cancelled_at'])
     assert_equal(true,
-                 File.exist?(Rails.configuration.crunch_refresh_trigger),
+                 File.exist?(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger),
                  'trigger file should be created when job is cancelled')
   end
 
@@ -123,7 +123,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
       # We need to verify that "cancel" creates a trigger file, so first
       # let's make sure there is no stale trigger file.
       begin
-        File.unlink(Rails.configuration.crunch_refresh_trigger)
+        File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
       rescue Errno::ENOENT
       end
 
@@ -144,7 +144,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
     # We need to verify that "cancel" creates a trigger file, so first
     # let's make sure there is no stale trigger file.
     begin
-      File.unlink(Rails.configuration.crunch_refresh_trigger)
+      File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
     rescue Errno::ENOENT
     end
 
@@ -480,7 +480,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
   end
 
   test 'jobs.create disabled in config' do
-    Rails.configuration.disable_api_methods = ["jobs.create",
+    Rails.configuration.API.DisabledAPIs = ["jobs.create",
                                                "pipeline_instances.create"]
     authorize_with :active
     post :create, params: {
index 0beff6882acc1b688614097891ef1f655efa1948..c61a57ecc835212860dfdc18e5018b1d07428bae 100644 (file)
@@ -223,7 +223,7 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase
   end
 
   test "node should fail ping with invalid hostname config format" do
-    Rails.configuration.assign_node_hostname = 'compute%<slot_number>04'  # should end with "04d"
+    Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%<slot_number>04'  # should end with "04d"
     post :ping, params: {
       id: nodes(:new_with_no_hostname).uuid,
       ping_secret: nodes(:new_with_no_hostname).info['ping_secret'],
index b810d69939b30208a1f944981e53bd6ad995fbe3..537fe525270333317cf6ef1fb77c53a6c035dce2 100644 (file)
@@ -200,15 +200,15 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
   end
 
   [
-    {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:"},
-    {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:"},
-    {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ },
-    {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"},
-    {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
-    {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ },
+    {cfg: "GitSSH.ExternalURL", cfgval: URI("ssh://git@example.com"), match: %r"^git@example.com:"},
+    {cfg: "GitSSH.ExternalURL", cfgval: URI(""), match: %r"^git@git.zzzzz.arvadosapi.com:"},
+    {cfg: "GitSSH", cfgval: false, refute: /^git@/ },
+    {cfg: "GitHTTP.ExternalURL", cfgval: URI("https://example.com/"), match: %r"^https://example.com/"},
+    {cfg: "GitHTTP.ExternalURL", cfgval: URI(""), match: %r"^https://git.zzzzz.arvadosapi.com/"},
+    {cfg: "GitHTTP", cfgval: false, refute: /^http/ },
   ].each do |expect|
     test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
-      Rails.configuration.send expect[:cfg].to_s+"=", expect[:cfgval]
+      ConfigLoader.set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
       authorize_with :active
       get :index
       assert_response :success
index 53c1ed72e7910c8cd19f36f0721aef5e53ea87ed..e62faa3314e3a3bed9fd1fa207d58ea73c75d128 100644 (file)
@@ -29,12 +29,12 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
     assert_response :success
     discovery_doc = JSON.parse(@response.body)
     assert_includes discovery_doc, 'defaultTrashLifetime'
-    assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
+    assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections.DefaultTrashLifetime
     assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
     assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
     assert_match(/^unknown$/, discovery_doc['packageVersion'])
-    assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
-    assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+    assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services.Websocket.ExternalURL.to_s
+    assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services.Workbench1.ExternalURL.to_s
     assert_equal('zzzzz', discovery_doc['uuidPrefix'])
   end
 
@@ -65,7 +65,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
   end
 
   test "non-empty disable_api_methods" do
-    Rails.configuration.disable_api_methods =
+    Rails.configuration.API.DisabledAPIs =
       ['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
     get :index
     assert_response :success
index 22a44a97ab1bd33ee27da724e1a01665ae8fa06d..0501da1673ebdff87c5c9900b205dfdde96dce42 100644 (file)
@@ -638,12 +638,12 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     setup_email = ActionMailer::Base.deliveries.last
     assert_not_nil setup_email, 'Expected email after setup'
 
-    assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+    assert_equal Rails.configuration.Users.UserNotifierEmailFrom, setup_email.from[0]
     assert_equal 'foo@example.com', setup_email.to[0]
     assert_equal 'Welcome to Arvados - shell account enabled', setup_email.subject
     assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),
         'Expected Your Arvados shell account has been set up in email body'
-    assert (setup_email.body.to_s.include? "#{Rails.configuration.workbench_address}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
+    assert (setup_email.body.to_s.include? "#{Rails.configuration.Services.Workbench1.ExternalURL}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
   end
 
   test "setup inactive user by changing is_active to true" do
index 170b59ee1e10d833fd3a0cb0fb6a6aef87bbc123..03189bdfeae50c808a48069338824772513ae541 100644 (file)
@@ -19,14 +19,14 @@ module GitTestHelper
   def self.included base
     base.setup do
       # Extract the test repository data into the default test
-      # environment's Rails.configuration.git_repositories_dir. (We
+      # environment's Rails.configuration.Git.Repositories. (We
       # don't use that config setting here, though: it doesn't seem
       # worth the risk of stepping on a real git repo root.)
       @tmpdir = Rails.root.join 'tmp', 'git'
       FileUtils.mkdir_p @tmpdir
       system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
-      Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
-      Rails.configuration.git_internal_dir = "#{@tmpdir}/internal.git"
+      Rails.configuration.Git.Repositories = "#{@tmpdir}/test"
+      Rails.configuration.Containers.JobsAPI.GitInternalDir = "#{@tmpdir}/internal.git"
     end
 
     base.teardown do
@@ -37,7 +37,7 @@ module GitTestHelper
   end
 
   def internal_tag tag
-    IO.read "|git --git-dir #{Rails.configuration.git_internal_dir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
+    IO.read "|git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
   end
 
   # Intercept fetch_remote_repository and fetch from a specified url
index 70965753425c8103963dd534fb3768dae85964e5..ab1a3e69de4a10ac6d623769ca126b6cf6909b78 100644 (file)
@@ -129,7 +129,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
 
   test "store collection as json" do
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
     signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
@@ -146,7 +146,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
 
   test "store collection with manifest_text only" do
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
     signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
@@ -163,7 +163,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
 
   test "store collection then update name" do
     signing_opts = {
-      key: Rails.configuration.blob_signing_key,
+      key: Rails.configuration.Collections.BlobSigningKey,
       api_token: api_token(:active),
     }
     signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
index e45dd4eb5213954631314dd3fdf57cf4ebc44beb..eb97fc1f49034165e6ae02a1896b116a3e835890 100644 (file)
@@ -194,7 +194,7 @@ class NonTransactionalGroupsTest < ActionDispatch::IntegrationTest
   end
 
   test "create request with async=true defers permissions update" do
-    Rails.configuration.async_permissions_update_interval = 1 # second
+    Rails.configuration.API.AsyncPermissionsUpdateInterval = 1 # second
     name = "Random group #{rand(1000)}"
     assert_equal nil, Group.find_by_name(name)
 
index 5c09cf1bcc6e274eb713b2cf7988263d53020790..90a55865397cefb6ce7e0829eca626a21734c2fc 100644 (file)
@@ -63,8 +63,8 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
     ready.pop
     @remote_server = srv
     @remote_host = "127.0.0.1:#{srv.config[:Port]}"
-    Rails.configuration.remote_hosts = Rails.configuration.remote_hosts.merge({'zbbbb' => @remote_host,
-                                                                               'zbork' => @remote_host})
+    Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({zbbbb: ActiveSupport::InheritableOptions.new({Host: @remote_host}),
+                                                                                   zbork: ActiveSupport::InheritableOptions.new({Host: @remote_host})})
     Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns "https://#{@remote_host}"
     @stub_status = 200
     @stub_content = {
@@ -243,7 +243,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
   end
 
   test 'auto-activate user from trusted cluster' do
-    Rails.configuration.auto_activate_users_from = ['zbbbb']
+    Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = true
     get '/arvados/v1/users/current',
       params: {format: 'json'},
       headers: auth(remote: 'zbbbb')
index f2dbaa5069528f50e6d37c0d441c1d6c0f797878..fdb8628c5d1377abfc9a2273c27d3b254265240d 100644 (file)
@@ -111,10 +111,10 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
    ].each do |testcase|
     test "user auto-activate #{testcase.inspect}" do
       # Configure auto_setup behavior according to testcase[:cfg]
-      Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto]
-      Rails.configuration.auto_setup_new_users_with_vm_uuid =
-        (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false)
-      Rails.configuration.auto_setup_new_users_with_repository =
+      Rails.configuration.Users.AutoSetupNewUsers = testcase[:cfg][:auto]
+      Rails.configuration.Users.AutoSetupNewUsersWithVmUUID =
+        (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : "")
+      Rails.configuration.Users.AutoSetupNewUsersWithRepository =
         testcase[:cfg][:repo]
 
       mock_auth_with(email: testcase[:email])
index 45278ac1aaa25fafb24270172b48a878fe4741b5..c81b331f24c7e20ae354a7ddc59255f3fb3ef125 100644 (file)
@@ -16,7 +16,7 @@ class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase
   end
 
   def run_with_expiry(clean_after)
-    Rails.configuration.clean_container_log_rows_after = clean_after
+    Rails.configuration.Containers.Logging.MaxAge = clean_after
     Rake::Task[TASK_NAME].reenable
     Rake.application.invoke_task TASK_NAME
   end
index 4d4cdbc9e5bea227406d1c7975a173d01b1eb88a..00660431c360800d5a63937cad81d1629cac1338 100644 (file)
@@ -16,7 +16,7 @@ class DeleteOldJobLogsTaskTest < ActiveSupport::TestCase
   end
 
   def run_with_expiry(clean_after)
-    Rails.configuration.clean_job_log_rows_after = clean_after
+    Rails.configuration.Containers.Logging.MaxAge = clean_after
     Rake::Task[TASK_NAME].reenable
     Rake.application.invoke_task TASK_NAME
   end
index 939242cf8e70eca87be21abac8ac1ef94c2a0a9b..5747a85cf598965d20b563c918a304b01f9dce87 100644 (file)
@@ -99,11 +99,8 @@ class ActiveSupport::TestCase
 
   def restore_configuration
     # Restore configuration settings changed during tests
-    $application_config.each do |k,v|
-      if k.match(/^[^.]*$/)
-        Rails.configuration.send (k + '='), v
-      end
-    end
+    ConfigLoader.copy_into_config $arvados_config, Rails.configuration
+    ConfigLoader.copy_into_config $remaining_config, Rails.configuration
   end
 
   def set_user_from_auth(auth_name)
index 429ebde97620ab0934b9fe347058f3986f27e704..293e28e4fa03e5275e22123b0ceea6cda6ab716e 100644 (file)
@@ -130,14 +130,14 @@ class BlobTest < ActiveSupport::TestCase
       expire: 0x7fffffff,
     }
 
-    original_ttl = Rails.configuration.blob_signature_ttl
-    Rails.configuration.blob_signature_ttl = original_ttl*2
+    original_ttl = Rails.configuration.Collections.BlobSigningTTL
+    Rails.configuration.Collections.BlobSigningTTL = original_ttl*2
     signed2 = Blob.sign_locator @@known_locator, {
       api_token: @@known_token,
       key: @@known_key,
       expire: 0x7fffffff,
     }
-    Rails.configuration.blob_signature_ttl = original_ttl
+    Rails.configuration.Collections.BlobSigningTTL = original_ttl
 
     assert_not_equal signed, signed2
   end
index 8deedee0186ea5bbd87ba6d219d7ef4d47f66314..477f9e27505200b3e080ae29b8362151adb6e21d 100644 (file)
@@ -157,8 +157,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "auto-create version after idle setting" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 600 # 10 minutes
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 600 # 10 minutes
     act_as_user users(:active) do
       # Set up initial collection
       c = create_collection 'foo', Encoding::US_ASCII
@@ -188,8 +188,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "preserve_version=false assignment is ignored while being true and not producing a new version" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 3600
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 3600
     act_as_user users(:active) do
       # Set up initial collection
       c = create_collection 'foo', Encoding::US_ASCII
@@ -244,8 +244,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "uuid updates on current version make older versions update their pointers" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 0
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 0
     act_as_system_user do
       # Set up initial collection
       c = create_collection 'foo', Encoding::US_ASCII
@@ -267,8 +267,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "older versions' modified_at indicate when they're created" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 0
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 0
     act_as_user users(:active) do
       # Set up initial collection
       c = create_collection 'foo', Encoding::US_ASCII
@@ -301,8 +301,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test "past versions should not be directly updatable" do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 0
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 0
     act_as_system_user do
       # Set up initial collection
       c = create_collection 'foo', Encoding::US_ASCII
@@ -324,7 +324,7 @@ class CollectionTest < ActiveSupport::TestCase
       assert c_old.invalid?
       c_old.reload
       # Now disable collection versioning, it should behave the same way
-      Rails.configuration.collection_versioning = false
+      Rails.configuration.Collections.CollectionVersioning = false
       c_old.name = 'this was foo'
       assert c_old.invalid?
     end
@@ -337,8 +337,8 @@ class CollectionTest < ActiveSupport::TestCase
     ['is_trashed', true, false],
   ].each do |attr, first_val, second_val|
     test "sync #{attr} with older versions" do
-      Rails.configuration.collection_versioning = true
-      Rails.configuration.preserve_version_if_idle = 0
+      Rails.configuration.Collections.CollectionVersioning = true
+      Rails.configuration.Collections.PreserveVersionIfIdle = 0
       act_as_system_user do
         # Set up initial collection
         c = create_collection 'foo', Encoding::US_ASCII
@@ -379,8 +379,8 @@ class CollectionTest < ActiveSupport::TestCase
     [false, 'replication_desired', 5, false],
   ].each do |versioning, attr, val, new_version_expected|
     test "update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version" do
-      Rails.configuration.collection_versioning = versioning
-      Rails.configuration.preserve_version_if_idle = 0
+      Rails.configuration.Collections.CollectionVersioning = versioning
+      Rails.configuration.Collections.PreserveVersionIfIdle = 0
       act_as_user users(:active) do
         # Create initial collection
         c = create_collection 'foo', Encoding::US_ASCII
@@ -414,8 +414,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test 'current_version_uuid is ignored during update' do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 0
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 0
     act_as_user users(:active) do
       # Create 1st collection
       col1 = create_collection 'foo', Encoding::US_ASCII
@@ -439,8 +439,8 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test 'with versioning enabled, simultaneous updates increment version correctly' do
-    Rails.configuration.collection_versioning = true
-    Rails.configuration.preserve_version_if_idle = 0
+    Rails.configuration.Collections.CollectionVersioning = true
+    Rails.configuration.Collections.PreserveVersionIfIdle = 0
     act_as_user users(:active) do
       # Create initial collection
       col = create_collection 'foo', Encoding::US_ASCII
@@ -654,7 +654,7 @@ class CollectionTest < ActiveSupport::TestCase
 
   [0, 2, 4, nil].each do |ask|
     test "set replication_desired to #{ask.inspect}" do
-      Rails.configuration.default_collection_replication = 2
+      Rails.configuration.Collections.DefaultReplication = 2
       act_as_user users(:active) do
         c = collections(:replication_undesired_unconfirmed)
         c.update_attributes replication_desired: ask
@@ -760,7 +760,7 @@ class CollectionTest < ActiveSupport::TestCase
                              name: 'foo',
                              trash_at: db_current_time + 1.years)
       sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
-      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
       assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
       assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
     end
@@ -849,7 +849,7 @@ class CollectionTest < ActiveSupport::TestCase
     test test_name do
       act_as_user users(:active) do
         min_exp = (db_current_time +
-                   Rails.configuration.blob_signature_ttl.seconds)
+                   Rails.configuration.Collections.BlobSigningTTL.seconds)
         if fixture_name == :expired_collection
           # Fixture-finder shorthand doesn't find trashed collections
           # because they're not in the default scope.
@@ -890,7 +890,7 @@ class CollectionTest < ActiveSupport::TestCase
   end
 
   test 'default trash interval > blob signature ttl' do
-    Rails.configuration.default_trash_lifetime = 86400 * 21 # 3 weeks
+    Rails.configuration.Collections.DefaultTrashLifetime = 86400 * 21 # 3 weeks
     start = db_current_time
     act_as_user users(:active) do
       c = Collection.create!(manifest_text: '', name: 'foo')
index af365b19e2e224b70946982cabe59c2c2fd8cb77..c5d72c3bfea7ef21cc93a5a8d88db4f564008601 100644 (file)
@@ -78,7 +78,7 @@ class CommitTest < ActiveSupport::TestCase
 
   test 'tag_in_internal_repository creates and updates tags in internal.git' do
     authorize_with :active
-    gitint = "git --git-dir #{Rails.configuration.git_internal_dir}"
+    gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir}"
     IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
     assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
     refute $?.success?
@@ -88,7 +88,7 @@ class CommitTest < ActiveSupport::TestCase
   end
 
   def with_foo_repository
-    Dir.chdir("#{Rails.configuration.git_repositories_dir}/#{repositories(:foo).uuid}") do
+    Dir.chdir("#{Rails.configuration.Git.Repositories}/#{repositories(:foo).uuid}") do
       must_pipe("git checkout master 2>&1")
       yield
     end
@@ -107,7 +107,7 @@ class CommitTest < ActiveSupport::TestCase
       must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
     end
     Commit.tag_in_internal_repository 'active/foo', sha1, tag
-    gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+    gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
     assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
     assert $?.success?
   end
@@ -123,7 +123,7 @@ class CommitTest < ActiveSupport::TestCase
       must_pipe("git reset --hard HEAD^")
     end
     Commit.tag_in_internal_repository 'active/foo', sha1, tag
-    gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+    gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
     assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
     assert $?.success?
   end
index 5c4a56c2c5f28200104ad5b7b8c78624fafb43ee..0dad6ee75ccf64b6484bf8da202b780932ff7a95 100644 (file)
@@ -252,6 +252,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     output = Collection.find_by_uuid cr.output_uuid
     assert_equal output_pdh, output.portable_data_hash
     assert_equal output.owner_uuid, project.uuid, "Container output should be copied to #{project.uuid}"
+    assert_not_nil output.modified_at
 
     log = Collection.find_by_uuid cr.log_uuid
     assert_equal log.manifest_text, ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
@@ -514,7 +515,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   test "Container.resolve_container_image(pdh)" do
     set_user_from_auth :active
     [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
-      Rails.configuration.docker_image_formats = [ver]
+      Rails.configuration.Containers.SupportedDockerImageFormats = [ver]
       pdh = collections(coll).portable_data_hash
       resolved = Container.resolve_container_image(pdh)
       assert_equal resolved, pdh
@@ -535,12 +536,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
 
   test "allow unrecognized container when there are remote_hosts" do
     set_user_from_auth :active
-    Rails.configuration.remote_hosts = {"foooo" => "bar.com"}
+    Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({foooo: ActiveSupport::InheritableOptions.new({Host: "bar.com"})})
     Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3')
   end
 
   test "migrated docker image" do
-    Rails.configuration.docker_image_formats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
     add_docker19_migration_link
 
     # Test that it returns only v2 images even though request is for v1 image.
@@ -558,7 +559,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "use unmigrated docker image" do
-    Rails.configuration.docker_image_formats = ['v1']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
     add_docker19_migration_link
 
     # Test that it returns only supported v1 images even though there is a
@@ -577,7 +578,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v1" do
-    Rails.configuration.docker_image_formats = ['v1']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
     add_docker19_migration_link
 
     # Don't return unsupported v2 image even if we ask for it directly.
@@ -590,7 +591,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
   end
 
   test "incompatible docker image v2" do
-    Rails.configuration.docker_image_formats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
     # No migration link, don't return unsupported v1 image,
 
     set_user_from_auth :active
@@ -836,7 +837,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     assert_not_nil(trash)
     assert_not_nil(delete)
     assert_in_delta(trash, now + 1.second, 10)
-    assert_in_delta(delete, now + Rails.configuration.blob_signature_ttl.second, 10)
+    assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL.second, 10)
   end
 
   def check_output_ttl_1y(now, trash, delete)
@@ -884,7 +885,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
     [false, ActiveRecord::RecordInvalid],
     [true, nil],
   ].each do |preemptible_conf, expected|
-    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+    test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
       sp = {"preemptible" => true}
       common_attrs = {cwd: "test",
                       priority: 1,
@@ -892,7 +893,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
                       output_path: "test",
                       scheduling_parameters: sp,
                       mounts: {"test" => {"kind" => "json"}}}
-      Rails.configuration.preemptible_instances = preemptible_conf
+      Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf
       set_user_from_auth :active
 
       cr = create_minimal_req!(common_attrs)
@@ -921,7 +922,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
                       scheduling_parameters: {"preemptible" => false},
                       mounts: {"test" => {"kind" => "json"}}}
 
-      Rails.configuration.preemptible_instances = true
+      Rails.configuration.Containers.UsePreemptibleInstances = true
       set_user_from_auth :active
 
       if requesting_c
@@ -946,14 +947,14 @@ class ContainerRequestTest < ActiveSupport::TestCase
     [false, 'zzzzz-dz642-runningcontainr', nil],
     [false, nil, nil],
   ].each do |preemptible_conf, requesting_c, schedule_preemptible|
-    test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+    test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
       common_attrs = {cwd: "test",
                       priority: 1,
                       command: ["echo", "hello"],
                       output_path: "test",
                       mounts: {"test" => {"kind" => "json"}}}
 
-      Rails.configuration.preemptible_instances = preemptible_conf
+      Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf
       set_user_from_auth :active
 
       if requesting_c
@@ -1017,7 +1018,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
                     state: ContainerRequest::Committed,
                     mounts: {"test" => {"kind" => "json"}}}
     set_user_from_auth :active
-    Rails.configuration.preemptible_instances = true
+    Rails.configuration.Containers.UsePreemptibleInstances = true
 
     cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
       create_minimal_req!(common_attrs)
index 5ce3739a36dc7ca5d4002019d36144b90a584da0..88fd5feb6ad27c3c55dd5531c0a2566422239f41 100644 (file)
@@ -184,7 +184,7 @@ class ContainerTest < ActiveSupport::TestCase
     assert_equal c1.runtime_status, {}
 
     assert_equal Container::Queued, c1.state
-    assert_raises ActiveRecord::RecordInvalid do
+    assert_raises ArvadosModel::PermissionDeniedError do
       c1.update_attributes! runtime_status: {'error' => 'Oops!'}
     end
 
@@ -241,7 +241,7 @@ class ContainerTest < ActiveSupport::TestCase
   end
 
   test "find_reusable method should select higher priority queued container" do
-        Rails.configuration.log_reuse_decisions = true
+        Rails.configuration.Containers.LogReuseDecisions = true
     set_user_from_auth :active
     common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
     c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))
@@ -511,7 +511,7 @@ class ContainerTest < ActiveSupport::TestCase
 
   test "find_reusable with logging enabled" do
     set_user_from_auth :active
-    Rails.configuration.log_reuse_decisions = true
+    Rails.configuration.Containers.LogReuseDecisions = true
     Rails.logger.expects(:info).at_least(3)
     Container.find_reusable(REUSABLE_COMMON_ATTRS)
   end
@@ -666,7 +666,7 @@ class ContainerTest < ActiveSupport::TestCase
   end
 
   test "Exceed maximum lock-unlock cycles" do
-    Rails.configuration.max_container_dispatch_attempts = 3
+    Rails.configuration.Containers.MaxDispatchAttempts = 3
 
     set_user_from_auth :active
     c, cr = minimal_new
@@ -777,6 +777,51 @@ class ContainerTest < ActiveSupport::TestCase
     end
   end
 
+  [
+    [Container::Queued, {state: Container::Locked}],
+    [Container::Queued, {state: Container::Running}],
+    [Container::Queued, {state: Container::Complete}],
+    [Container::Queued, {state: Container::Cancelled}],
+    [Container::Queued, {priority: 123456789}],
+    [Container::Queued, {runtime_status: {'error' => 'oops'}}],
+    [Container::Queued, {cwd: '/'}],
+    [Container::Locked, {state: Container::Running}],
+    [Container::Locked, {state: Container::Queued}],
+    [Container::Locked, {priority: 123456789}],
+    [Container::Locked, {runtime_status: {'error' => 'oops'}}],
+    [Container::Locked, {cwd: '/'}],
+    [Container::Running, {state: Container::Complete}],
+    [Container::Running, {state: Container::Cancelled}],
+    [Container::Running, {priority: 123456789}],
+    [Container::Running, {runtime_status: {'error' => 'oops'}}],
+    [Container::Running, {cwd: '/'}],
+    [Container::Complete, {state: Container::Cancelled}],
+    [Container::Complete, {priority: 123456789}],
+    [Container::Complete, {runtime_status: {'error' => 'oops'}}],
+    [Container::Complete, {cwd: '/'}],
+    [Container::Cancelled, {cwd: '/'}],
+  ].each do |start_state, updates|
+    test "Container update #{updates.inspect} when #{start_state} forbidden for non-admin" do
+      set_user_from_auth :active
+      c, _ = minimal_new
+      if start_state != Container::Queued
+        set_user_from_auth :dispatch1
+        c.lock
+        if start_state != Container::Locked
+          c.update_attributes! state: Container::Running
+          if start_state != Container::Running
+            c.update_attributes! state: start_state
+          end
+        end
+      end
+      assert_equal c.state, start_state
+      set_user_from_auth :active
+      assert_raises(ArvadosModel::PermissionDeniedError) do
+        c.update_attributes! updates
+      end
+    end
+  end
+
   test "Container only set exit code on complete" do
     set_user_from_auth :active
     c, _ = minimal_new
@@ -899,7 +944,9 @@ class ContainerTest < ActiveSupport::TestCase
     c.update_attributes! state: Container::Running
 
     set_user_from_auth :running_to_be_deleted_container_auth
-    refute c.update_attributes(output: collections(:foo_file).portable_data_hash)
+    assert_raises(ArvadosModel::PermissionDeniedError) do
+      c.update_attributes(output: collections(:foo_file).portable_data_hash)
+    end
   end
 
   test "can set trashed output on running container" do
index 42ef0d160cb1c595f781858cb9137a79d88e1da0..3a8f90a66b6705da6359cef0d59b49ef8db61996 100644 (file)
@@ -99,7 +99,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
 
   test 'override --cgroup-root with CRUNCH_CGROUP_ROOT' do
     ENV['CRUNCH_CGROUP_ROOT'] = '/path/to/cgroup'
-    Rails.configuration.crunch_job_wrapper = :none
+    Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "none"
     act_as_system_user do
       j = Job.create(repository: 'active/foo',
                      script: 'hash',
@@ -140,7 +140,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
 
   test 'rate limit of partial line segments' do
     act_as_system_user do
-      Rails.configuration.crunch_log_partial_line_throttle_period = 1
+      Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod = 1
 
       job = {}
       job[:bytes_logged] = 0
@@ -197,7 +197,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
   end
 
   test 'scancel orphaned job nodes' do
-    Rails.configuration.crunch_job_wrapper = :slurm_immediate
+    Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate"
     act_as_system_user do
       dispatch = CrunchDispatch.new
 
index 3c7f9a9096538586bcaba5fb52c6b88bc46e11f3..304335c6f08ada4f7832843b9a8d8a1349d7fddf 100644 (file)
@@ -40,8 +40,8 @@ class FailJobsTest < ActiveSupport::TestCase
   end
 
   test 'cancel slurm jobs' do
-    Rails.configuration.crunch_job_wrapper = :slurm_immediate
-    Rails.configuration.crunch_job_user = 'foobar'
+    Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate"
+    Rails.configuration.Containers.JobsAPI.CrunchJobUser = 'foobar'
     fake_squeue = IO.popen("echo #{@job[:before_reboot].uuid}")
     fake_scancel = IO.popen("true")
     IO.expects(:popen).
@@ -55,7 +55,7 @@ class FailJobsTest < ActiveSupport::TestCase
   end
 
   test 'use reboot time' do
-    Rails.configuration.crunch_job_wrapper = nil
+    Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = nil
     @dispatch.expects(:open).once.with('/proc/stat').
       returns open(Rails.root.join('test/fixtures/files/proc_stat'))
     @dispatch.fail_jobs(before: 'reboot')
index fcbd1722f38f4aff02ac4f38be8cfa7b61c87a0b..f47a1c10f9b0d1b9d9cf6031c3d5b87446a9a29b 100644 (file)
@@ -90,7 +90,7 @@ class JobTest < ActiveSupport::TestCase
   ].each do |use_config|
     test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do
       default_docker_image = collections(:docker_image)[:portable_data_hash]
-      Rails.configuration.default_docker_image_for_jobs = default_docker_image if use_config
+      Rails.configuration.Containers.JobsAPI.DefaultDockerImage = default_docker_image if use_config
 
       job = Job.new job_attrs
       assert job.valid?, job.errors.full_messages.to_s
@@ -127,10 +127,10 @@ class JobTest < ActiveSupport::TestCase
     'locator' => BAD_COLLECTION,
   }.each_pair do |spec_type, image_spec|
     test "Job validation fails with nonexistent Docker image #{spec_type}" do
-      Rails.configuration.remote_hosts = {}
+      Rails.configuration.RemoteClusters = {}
       job = Job.new job_attrs(runtime_constraints:
                               {'docker_image' => image_spec})
-      assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+      assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid")
     end
   end
 
@@ -426,7 +426,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test "use migrated docker image if requesting old-format image by tag" do
-    Rails.configuration.docker_image_formats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
     add_docker19_migration_link
     job = Job.create!(
       job_attrs(
@@ -438,7 +438,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test "use migrated docker image if requesting old-format image by pdh" do
-    Rails.configuration.docker_image_formats = ['v2']
+    Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
     add_docker19_migration_link
     job = Job.create!(
       job_attrs(
@@ -455,7 +455,7 @@ class JobTest < ActiveSupport::TestCase
    [:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
   ].each do |existing_image, request_image, expect_image|
     test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
-      Rails.configuration.docker_image_formats = ['v1']
+      Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
 
       if existing_image == :docker_image
         oldjob = Job.create!(
@@ -477,7 +477,7 @@ class JobTest < ActiveSupport::TestCase
         end
       end
 
-      Rails.configuration.docker_image_formats = ['v2']
+      Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
       add_docker19_migration_link
 
       # Check that both v1 and v2 images get resolved to v2.
@@ -568,7 +568,7 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test 'find_reusable with logging' do
-    Rails.configuration.log_reuse_decisions = true
+    Rails.configuration.Containers.LogReuseDecisions = true
     Rails.logger.expects(:info).at_least(3)
     try_find_reusable
   end
@@ -595,7 +595,7 @@ class JobTest < ActiveSupport::TestCase
     assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
 
     # ...unless config says to reuse the earlier job in such cases.
-    Rails.configuration.reuse_job_if_outputs_differ = true
+    Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer = true
     j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
     assert_equal foobar.uuid, j.uuid
   end
@@ -648,33 +648,32 @@ class JobTest < ActiveSupport::TestCase
   end
 
   test 'enable legacy api configuration option = true' do
-    Rails.configuration.enable_legacy_jobs_api = true
+    Rails.configuration.Containers.JobsAPI.Enable = "true"
     check_enable_legacy_jobs_api
-    assert_equal [], Rails.configuration.disable_api_methods
+    assert_equal [], Rails.configuration.API.DisabledAPIs
   end
 
   test 'enable legacy api configuration option = false' do
-    Rails.configuration.enable_legacy_jobs_api = false
+    Rails.configuration.Containers.JobsAPI.Enable = "false"
     check_enable_legacy_jobs_api
-    assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+    assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
   end
 
   test 'enable legacy api configuration option = auto, has jobs' do
-    Rails.configuration.enable_legacy_jobs_api = "auto"
+    Rails.configuration.Containers.JobsAPI.Enable = "auto"
     assert Job.count > 0
-    assert_equal [], Rails.configuration.disable_api_methods
     check_enable_legacy_jobs_api
-    assert_equal [], Rails.configuration.disable_api_methods
+    assert_equal [], Rails.configuration.API.DisabledAPIs
   end
 
   test 'enable legacy api configuration option = auto, no jobs' do
-    Rails.configuration.enable_legacy_jobs_api = "auto"
+    Rails.configuration.Containers.JobsAPI.Enable = "auto"
     act_as_system_user do
       Job.destroy_all
     end
     assert_equal 0, Job.count
-    assert_equal [], Rails.configuration.disable_api_methods
+    assert_equal [], Rails.configuration.API.DisabledAPIs
     check_enable_legacy_jobs_api
-    assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+    assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
   end
 end
index 5a78f25235047f485a5416aefe818dec6207bd85..8a878ada91a9b9f0afe6da60f60036eaca3450e6 100644 (file)
@@ -282,7 +282,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "non-empty configuration.unlogged_attributes" do
-    Rails.configuration.unlogged_attributes = ["manifest_text"]
+    Rails.configuration.AuditLogs.UnloggedAttributes = ["manifest_text"]
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
@@ -297,7 +297,7 @@ class LogTest < ActiveSupport::TestCase
   end
 
   test "empty configuration.unlogged_attributes" do
-    Rails.configuration.unlogged_attributes = []
+    Rails.configuration.AuditLogs.UnloggedAttributes = []
     txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
 
     act_as_system_user do
@@ -332,8 +332,8 @@ class LogTest < ActiveSupport::TestCase
   test 'retain old audit logs with default settings' do
     assert_no_logs_deleted do
       AuditLogs.delete_old(
-        max_age: Rails.configuration.max_audit_log_age,
-        max_batch: Rails.configuration.max_audit_log_delete_batch)
+        max_age: Rails.configuration.AuditLogs.MaxAge,
+        max_batch: Rails.configuration.AuditLogs.MaxDeleteBatch)
     end
   end
 
@@ -362,8 +362,8 @@ class LogTest < ActiveSupport::TestCase
 
   test 'delete old audit logs in thread' do
     begin
-      Rails.configuration.max_audit_log_age = 20
-      Rails.configuration.max_audit_log_delete_batch = 100000
+      Rails.configuration.AuditLogs.MaxAge = 20
+      Rails.configuration.AuditLogs.MaxDeleteBatch = 100000
       Rails.cache.delete 'AuditLogs'
       initial_log_count = Log.unscoped.all.count + 1
       act_as_system_user do
index 4cb7a0a1b1ca1ac669bafaa7ef717a816cb493b3..b54e8d9de64f970726dc49d07ca47e368491986a 100644 (file)
@@ -34,8 +34,8 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "dns_server_conf_template" do
-    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
-    Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
     conffile = Rails.root.join 'tmp', 'compute65535.conf'
     File.unlink conffile rescue nil
     assert Node.dns_server_update 'compute65535', '127.0.0.1'
@@ -44,8 +44,8 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "dns_server_restart_command" do
-    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
-    Rails.configuration.dns_server_reload_command = 'foobar'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
     restartfile = Rails.root.join 'tmp', 'restart.txt'
     File.unlink restartfile rescue nil
     assert Node.dns_server_update 'compute65535', '127.0.0.127'
@@ -54,14 +54,14 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "dns_server_restart_command fail" do
-    Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp', 'bogusdir'
-    Rails.configuration.dns_server_reload_command = 'foobar'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp', 'bogusdir'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
     refute Node.dns_server_update 'compute65535', '127.0.0.127'
   end
 
   test "dns_server_update_command with valid command" do
     testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt')
-    Rails.configuration.dns_server_update_command =
+    Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand =
       ('echo -n "%{hostname} == %{ip_address}" >' +
        testfile.to_s.shellescape)
     assert Node.dns_server_update 'compute65535', '127.0.0.1'
@@ -70,23 +70,23 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "dns_server_update_command with failing command" do
-    Rails.configuration.dns_server_update_command = 'false %{hostname}'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = 'false %{hostname}'
     refute Node.dns_server_update 'compute65535', '127.0.0.1'
   end
 
   test "dns update with no commands/dirs configured" do
-    Rails.configuration.dns_server_update_command = false
-    Rails.configuration.dns_server_conf_dir = false
-    Rails.configuration.dns_server_conf_template = 'ignored!'
-    Rails.configuration.dns_server_reload_command = 'ignored!'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = ""
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = ""
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = 'ignored!'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'ignored!'
     assert Node.dns_server_update 'compute65535', '127.0.0.127'
   end
 
   test "don't leave temp files behind if there's an error writing them" do
-    Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+    Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
     Tempfile.any_instance.stubs(:puts).raises(IOError)
     Dir.mktmpdir do |tmpdir|
-      Rails.configuration.dns_server_conf_dir = tmpdir
+      Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = tmpdir
       refute Node.dns_server_update 'compute65535', '127.0.0.127'
       assert_empty Dir.entries(tmpdir).select{|f| File.file? f}
     end
@@ -100,14 +100,14 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "ping new node with no hostname and no config" do
-    Rails.configuration.assign_node_hostname = false
+    Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
     node = ping_node(:new_with_no_hostname, {})
     refute_nil node.slot_number
     assert_nil node.hostname
   end
 
   test "ping new node with zero padding config" do
-    Rails.configuration.assign_node_hostname = 'compute%<slot_number>04d'
+    Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%<slot_number>04d'
     node = ping_node(:new_with_no_hostname, {})
     slot_number = node.slot_number
     refute_nil slot_number
@@ -121,7 +121,7 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test "ping node with hostname and no config and expect hostname unchanged" do
-    Rails.configuration.assign_node_hostname = false
+    Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
     node = ping_node(:new_with_custom_hostname, {})
     assert_equal(23, node.slot_number)
     assert_equal("custom1", node.hostname)
@@ -196,13 +196,13 @@ class NodeTest < ActiveSupport::TestCase
   end
 
   test 'run out of slots' do
-    Rails.configuration.max_compute_nodes = 3
+    Rails.configuration.Containers.MaxComputeVMs = 3
     act_as_system_user do
       Node.destroy_all
       (1..4).each do |i|
         n = Node.create!
         args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] }
-        if i <= Rails.configuration.max_compute_nodes
+        if i <= Rails.configuration.Containers.MaxComputeVMs
           n.ping(args)
         else
           assert_raises do
index fa4c37f74f211b6afe5d0b90804e9b2c85ce97c5..cb562ef977200740e3d116889dad9ed1b9f55cb8 100644 (file)
@@ -23,15 +23,15 @@ class RepositoryTest < ActiveSupport::TestCase
   def default_git_url(repo_name, user_name=nil)
     if user_name
       "git@git.%s.arvadosapi.com:%s/%s.git" %
-        [Rails.configuration.uuid_prefix, user_name, repo_name]
+        [Rails.configuration.ClusterID, user_name, repo_name]
     else
       "git@git.%s.arvadosapi.com:%s.git" %
-        [Rails.configuration.uuid_prefix, repo_name]
+        [Rails.configuration.ClusterID, repo_name]
     end
   end
 
   def assert_server_path(path_tail, repo_sym)
-    assert_equal(File.join(Rails.configuration.git_repositories_dir, path_tail),
+    assert_equal(File.join(Rails.configuration.Git.Repositories, path_tail),
                  repositories(repo_sym).server_path)
   end
 
index 008259c0b65041146ed4e59e29eff34876bc6204..f409d231f1587e6355a1a43d45af6c425ecee416 100644 (file)
@@ -14,12 +14,12 @@ class UserNotifierTest < ActionMailer::TestCase
     assert_not_nil email
 
     # Test the body of the sent email contains what we expect it to
-    assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+    assert_equal Rails.configuration.Users.UserNotifierEmailFrom, email.from.first
     assert_equal user.email, email.to.first
     assert_equal 'Welcome to Arvados - shell account enabled', email.subject
     assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
         'Expected Your Arvados shell account has been set up in email body'
-    assert (email.body.to_s.include? Rails.configuration.workbench_address),
+    assert (email.body.to_s.include? Rails.configuration.Services.Workbench1.ExternalURL.to_s),
         'Expected workbench url in email body'
   end
 
index 67c410047cfb5e62ba65be801a46bd20b721971d..185653e873811d8b79e18de40c0f589b5763557a 100644 (file)
@@ -110,7 +110,7 @@ class UserTest < ActiveSupport::TestCase
   end
 
   test "new username set avoiding blacklist" do
-    Rails.configuration.auto_setup_name_blacklist = ["root"]
+    Rails.configuration.Users.AutoSetupUsernameBlacklist = ["root"]
     check_new_username_setting("root", "root2")
   end
 
@@ -157,8 +157,8 @@ class UserTest < ActiveSupport::TestCase
    [false, 'bar@example.com', nil, true],
    [true, 'foo@example.com', true, nil],
    [true, 'bar@example.com', true, true],
-   [false, false, nil, nil],
-   [true, false, true, nil]
+   [false, '', nil, nil],
+   [true, '', true, nil]
   ].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
     # In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
     test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
@@ -170,8 +170,8 @@ class UserTest < ActiveSupport::TestCase
         assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)"
       end
 
-      Rails.configuration.auto_admin_first_user = auto_admin_first_user_config
-      Rails.configuration.auto_admin_user = auto_admin_user_config
+      Rails.configuration.Users.AutoAdminFirstUser = auto_admin_first_user_config
+      Rails.configuration.Users.AutoAdminUserWithEmail = auto_admin_user_config
 
       # See if the foo user has is_admin
       foo = User.new
@@ -384,15 +384,15 @@ class UserTest < ActiveSupport::TestCase
     test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
       set_user_from_auth :admin
 
-      Rails.configuration.auto_setup_new_users = true
+      Rails.configuration.Users.AutoSetupNewUsers = true
 
       if auto_setup_vm
-        Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid']
+        Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = virtual_machines(:testvm)['uuid']
       else
-        Rails.configuration.auto_setup_new_users_with_vm_uuid = false
+        Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = ""
       end
 
-      Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo
+      Rails.configuration.Users.AutoSetupNewUsersWithRepository = auto_setup_repo
 
       create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username
     end
@@ -625,12 +625,12 @@ class UserTest < ActiveSupport::TestCase
   end
 
   def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username)
-    Rails.configuration.new_user_notification_recipients = new_user_recipients
-    Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients
+    Rails.configuration.Users.NewUserNotificationRecipients = new_user_recipients
+    Rails.configuration.Users.NewInactiveUserNotificationRecipients = inactive_recipients
 
     ActionMailer::Base.deliveries = []
 
-    can_setup = (Rails.configuration.auto_setup_new_users and
+    can_setup = (Rails.configuration.Users.AutoSetupNewUsers and
                  (not expect_username.nil?))
     expect_repo_name = "#{expect_username}/#{expect_username}"
     prior_repo = Repository.where(name: expect_repo_name).first
@@ -643,21 +643,21 @@ class UserTest < ActiveSupport::TestCase
     assert_equal(expect_username, user.username)
 
     # check user setup
-    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+    verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
                        groups(:all_users).uuid, user.uuid,
                        "permission", "can_read")
     # Check for OID login link.
-    verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+    verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
                        user.uuid, user.email, "permission", "can_login")
     # Check for repository.
     if named_repo = (prior_repo or
                      Repository.where(name: expect_repo_name).first)
       verify_link_exists((can_setup and prior_repo.nil? and
-                          Rails.configuration.auto_setup_new_users_with_repository),
+                          Rails.configuration.Users.AutoSetupNewUsersWithRepository),
                          named_repo.uuid, user.uuid, "permission", "can_manage")
     end
     # Check for VM login.
-    if auto_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+    if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
       verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
                          "permission", "can_login", "username", expect_username)
     end
@@ -666,17 +666,17 @@ class UserTest < ActiveSupport::TestCase
     new_user_email = nil
     new_inactive_user_email = nil
 
-    new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification"
-    if Rails.configuration.auto_setup_new_users
+    new_user_email_subject = "#{Rails.configuration.Users.EmailSubjectPrefix}New user created notification"
+    if Rails.configuration.Users.AutoSetupNewUsers
       new_user_email_subject = (expect_username or active) ?
-                                 "#{Rails.configuration.email_subject_prefix}New user created and setup notification" :
-                                 "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification"
+                                 "#{Rails.configuration.Users.EmailSubjectPrefix}New user created and setup notification" :
+                                 "#{Rails.configuration.Users.EmailSubjectPrefix}New user created, but not setup notification"
     end
 
     ActionMailer::Base.deliveries.each do |d|
       if d.subject == new_user_email_subject then
         new_user_email = d
-      elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then
+      elsif d.subject == "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification" then
         new_inactive_user_email = d
       end
     end
@@ -685,7 +685,7 @@ class UserTest < ActiveSupport::TestCase
     # if the new user email recipients config parameter is set
     if not new_user_recipients.empty? then
       assert_not_nil new_user_email, 'Expected new user email after setup'
-      assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0]
+      assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]
       assert_equal new_user_recipients, new_user_email.to[0]
       assert_equal new_user_email_subject, new_user_email.subject
     else
@@ -695,9 +695,9 @@ class UserTest < ActiveSupport::TestCase
     if not active
       if not inactive_recipients.empty? then
         assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
-        assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0]
+        assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]
         assert_equal inactive_recipients, new_inactive_user_email.to[0]
-        assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject
+        assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject
       else
         assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
       end
index 836be2e6080c3345758a38bbd0953ace6d7a11c0..08a6c5881c51e32951d475ec49b37a833f360532 100644 (file)
@@ -8,12 +8,16 @@ import (
        "bytes"
        "crypto/md5"
        "fmt"
+       "io"
+       "io/ioutil"
        "log"
        "math"
+       "os"
        "runtime"
        "sort"
        "strings"
        "sync"
+       "syscall"
        "time"
 
        "git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -31,10 +35,12 @@ import (
 // BlobSignatureTTL; and all N existing replicas of a given data block
 // are in the N best positions in rendezvous probe order.
 type Balancer struct {
-       Logger  *logrus.Logger
-       Dumper  *logrus.Logger
+       Logger  logrus.FieldLogger
+       Dumper  logrus.FieldLogger
        Metrics *metrics
 
+       LostBlocksFile string
+
        *BlockStateMap
        KeepServices       map[string]*KeepService
        DefaultReplication int
@@ -48,6 +54,7 @@ type Balancer struct {
        errors        []error
        stats         balancerStats
        mutex         sync.Mutex
+       lostBlocks    io.Writer
 }
 
 // Run performs a balance operation using the given config and
@@ -64,6 +71,30 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions R
 
        defer bal.time("sweep", "wall clock time to run one full sweep")()
 
+       var lbFile *os.File
+       if bal.LostBlocksFile != "" {
+               tmpfn := bal.LostBlocksFile + ".tmp"
+               lbFile, err = os.OpenFile(tmpfn, os.O_CREATE|os.O_WRONLY, 0777)
+               if err != nil {
+                       return
+               }
+               defer lbFile.Close()
+               err = syscall.Flock(int(lbFile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+               if err != nil {
+                       return
+               }
+               defer func() {
+                       // Remove the tempfile only if we didn't get
+                       // as far as successfully renaming it.
+                       if lbFile != nil {
+                               os.Remove(tmpfn)
+                       }
+               }()
+               bal.lostBlocks = lbFile
+       } else {
+               bal.lostBlocks = ioutil.Discard
+       }
+
        if len(config.KeepServiceList.Items) > 0 {
                err = bal.SetKeepServices(config.KeepServiceList)
        } else {
@@ -107,6 +138,17 @@ func (bal *Balancer) Run(config Config, runOptions RunOptions) (nextRunOptions R
        if err = bal.CheckSanityLate(); err != nil {
                return
        }
+       if lbFile != nil {
+               err = lbFile.Sync()
+               if err != nil {
+                       return
+               }
+               err = os.Rename(bal.LostBlocksFile+".tmp", bal.LostBlocksFile)
+               if err != nil {
+                       return
+               }
+               lbFile = nil
+       }
        if runOptions.CommitPulls {
                err = bal.CommitPulls(&config.Client)
                if err != nil {
@@ -206,6 +248,24 @@ func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
                        return fmt.Errorf("config error: %s: proxy servers cannot be balanced", srv)
                }
        }
+
+       var checkPage arvados.CollectionList
+       if err = c.RequestAndDecode(&checkPage, "GET", "arvados/v1/collections", nil, arvados.ResourceListParams{
+               Limit:              new(int),
+               Count:              "exact",
+               IncludeTrash:       true,
+               IncludeOldVersions: true,
+               Filters: []arvados.Filter{{
+                       Attr:     "modified_at",
+                       Operator: "=",
+                       Operand:  nil,
+               }},
+       }); err != nil {
+               return err
+       } else if n := checkPage.ItemsAvailable; n > 0 {
+               return fmt.Errorf("%d collections exist with null modified_at; cannot fetch reliably", n)
+       }
+
        return nil
 }
 
@@ -332,7 +392,7 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
                defer wg.Done()
                for coll := range collQ {
                        err := bal.addCollection(coll)
-                       if err != nil {
+                       if err != nil || len(errs) > 0 {
                                select {
                                case errs <- err:
                                default:
@@ -383,17 +443,20 @@ func (bal *Balancer) GetCurrentState(c *arvados.Client, pageSize, bufs int) erro
 func (bal *Balancer) addCollection(coll arvados.Collection) error {
        blkids, err := coll.SizedDigests()
        if err != nil {
-               bal.mutex.Lock()
-               bal.errors = append(bal.errors, fmt.Errorf("%v: %v", coll.UUID, err))
-               bal.mutex.Unlock()
-               return nil
+               return fmt.Errorf("%v: %v", coll.UUID, err)
        }
        repl := bal.DefaultReplication
        if coll.ReplicationDesired != nil {
                repl = *coll.ReplicationDesired
        }
        debugf("%v: %d block x%d", coll.UUID, len(blkids), repl)
-       bal.BlockStateMap.IncreaseDesired(coll.StorageClassesDesired, repl, blkids)
+       // Pass pdh to IncreaseDesired only if LostBlocksFile is being
+       // written -- otherwise it's just a waste of memory.
+       pdh := ""
+       if bal.LostBlocksFile != "" {
+               pdh = coll.PortableDataHash
+       }
+       bal.BlockStateMap.IncreaseDesired(pdh, coll.StorageClassesDesired, repl, blkids)
        return nil
 }
 
@@ -444,7 +507,7 @@ func (bal *Balancer) ComputeChangeSets() {
 
 func (bal *Balancer) setupLookupTables() {
        bal.serviceRoots = make(map[string]string)
-       bal.classes = []string{"default"}
+       bal.classes = defaultClasses
        bal.mountsByClass = map[string]map[*KeepMount]bool{"default": {}}
        bal.mounts = 0
        for _, srv := range bal.KeepServices {
@@ -732,17 +795,17 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                                From:        slot.mnt,
                        })
                        change = changeTrash
-               case len(blk.Replicas) == 0:
-                       change = changeNone
-               case slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
+               case len(blk.Replicas) > 0 && slot.repl == nil && slot.want && !slot.mnt.ReadOnly:
                        slot.mnt.KeepService.AddPull(Pull{
                                SizedDigest: blkid,
                                From:        blk.Replicas[0].KeepMount.KeepService,
                                To:          slot.mnt,
                        })
                        change = changePull
-               default:
+               case slot.repl != nil:
                        change = changeStay
+               default:
+                       change = changeNone
                }
                if bal.Dumper != nil {
                        var mtime int64
@@ -754,7 +817,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
                }
        }
        if bal.Dumper != nil {
-               bal.Dumper.Printf("%s have=%d want=%v %s", blkid, have, want, strings.Join(changes, " "))
+               bal.Dumper.Printf("%s refs=%d have=%d want=%v %v %v", blkid, blk.RefCount, have, want, blk.Desired, changes)
        }
        return balanceResult{
                blk:        blk,
@@ -867,6 +930,11 @@ func (bal *Balancer) collectStatistics(results <-chan balanceResult) {
                        s.lost.replicas -= surplus
                        s.lost.blocks++
                        s.lost.bytes += bytes * int64(-surplus)
+                       fmt.Fprintf(bal.lostBlocks, "%s", strings.SplitN(string(result.blkid), "+", 2)[0])
+                       for pdh := range result.blk.Refs {
+                               fmt.Fprintf(bal.lostBlocks, " %s", pdh)
+                       }
+                       fmt.Fprint(bal.lostBlocks, "\n")
                case surplus < 0:
                        s.underrep.replicas -= surplus
                        s.underrep.blocks++
index 7e2adcfedd4dc33b67ccae5301813cb531f6af9d..db530bc4926de88502132730f35c816ec3cf92b6 100644 (file)
@@ -11,6 +11,7 @@ import (
        "io/ioutil"
        "net/http"
        "net/http/httptest"
+       "os"
        "strings"
        "sync"
        "time"
@@ -203,6 +204,8 @@ func (s *stubServer) serveCollectionsButSkipOne() *reqTracker {
                        io.WriteString(w, `{"items_available":0,"items":[]}`)
                } else if strings.Contains(r.Form.Get("filters"), `"modified_at","="`) && strings.Contains(r.Form.Get("filters"), `"uuid","\u003e"`) {
                        io.WriteString(w, `{"items_available":0,"items":[]}`)
+               } else if strings.Contains(r.Form.Get("filters"), `"modified_at","=",null`) {
+                       io.WriteString(w, `{"items_available":0,"items":[]}`)
                } else {
                        io.WriteString(w, `{"items_available":2,"items":[
                                {"uuid":"zzzzz-4zz18-ehbhgtheo8909or","portable_data_hash":"fa7aeb5140e2848d39b416daeef4ffc5+45","manifest_text":". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n","modified_at":"2014-02-03T17:22:54Z"},
@@ -268,6 +271,28 @@ func (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {
        return rt
 }
 
+func (s *stubServer) serveKeepstoreIndexFoo1() *reqTracker {
+       rt := &reqTracker{}
+       s.mux.HandleFunc("/index/", func(w http.ResponseWriter, r *http.Request) {
+               rt.Add(r)
+               io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n")
+       })
+       for _, mounts := range stubMounts {
+               for i, mnt := range mounts {
+                       i := i
+                       s.mux.HandleFunc(fmt.Sprintf("/mounts/%s/blocks", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {
+                               rt.Add(r)
+                               if i == 0 {
+                                       io.WriteString(w, "acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\n\n")
+                               } else {
+                                       io.WriteString(w, "\n")
+                               }
+                       })
+               }
+       }
+       return rt
+}
+
 func (s *stubServer) serveKeepstoreTrash() *reqTracker {
        return s.serveStatic("/trash", `{}`)
 }
@@ -404,6 +429,32 @@ func (s *runSuite) TestDetectSkippedCollections(c *check.C) {
        c.Check(pullReqs.Count(), check.Equals, 0)
 }
 
+func (s *runSuite) TestWriteLostBlocks(c *check.C) {
+       lostf, err := ioutil.TempFile("", "keep-balance-lost-blocks-test-")
+       c.Assert(err, check.IsNil)
+       s.config.LostBlocksFile = lostf.Name()
+       defer os.Remove(lostf.Name())
+       opts := RunOptions{
+               CommitPulls: true,
+               CommitTrash: true,
+               Logger:      s.logger(c),
+       }
+       s.stub.serveCurrentUserAdmin()
+       s.stub.serveFooBarFileCollections()
+       s.stub.serveKeepServices(stubServices)
+       s.stub.serveKeepstoreMounts()
+       s.stub.serveKeepstoreIndexFoo1()
+       s.stub.serveKeepstoreTrash()
+       s.stub.serveKeepstorePull()
+       srv, err := NewServer(s.config, opts)
+       c.Assert(err, check.IsNil)
+       _, err = srv.Run()
+       c.Check(err, check.IsNil)
+       lost, err := ioutil.ReadFile(lostf.Name())
+       c.Assert(err, check.IsNil)
+       c.Check(string(lost), check.Equals, "37b51d194a7513e45b56f6524f2d51f2 fa7aeb5140e2848d39b416daeef4ffc5+45\n")
+}
+
 func (s *runSuite) TestDryRun(c *check.C) {
        opts := RunOptions{
                CommitPulls: false,
@@ -433,6 +484,11 @@ func (s *runSuite) TestDryRun(c *check.C) {
 }
 
 func (s *runSuite) TestCommit(c *check.C) {
+       lostf, err := ioutil.TempFile("", "keep-balance-lost-blocks-test-")
+       c.Assert(err, check.IsNil)
+       s.config.LostBlocksFile = lostf.Name()
+       defer os.Remove(lostf.Name())
+
        s.config.Listen = ":"
        s.config.ManagementToken = "xyzzy"
        opts := RunOptions{
@@ -460,6 +516,10 @@ func (s *runSuite) TestCommit(c *check.C) {
        // in a poor rendezvous position
        c.Check(bal.stats.pulls, check.Equals, 2)
 
+       lost, err := ioutil.ReadFile(lostf.Name())
+       c.Assert(err, check.IsNil)
+       c.Check(string(lost), check.Equals, "")
+
        metrics := s.getMetrics(c, srv)
        c.Check(metrics, check.Matches, `(?ms).*\narvados_keep_total_bytes 15\n.*`)
        c.Check(metrics, check.Matches, `(?ms).*\narvados_keepbalance_changeset_compute_seconds_sum [0-9\.]+\n.*`)
index 37be185dcc1af9fbc4ebf9f83cd8302ca15b8e6d..423546c46a9c179aab3b15522912667c72cdbb8f 100644 (file)
@@ -132,6 +132,75 @@ func (bal *balancerSuite) TestSkipReadonly(c *check.C) {
                shouldPull: slots{2, 4}})
 }
 
+func (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {
+       bal.testMultipleViews(c, true)
+}
+
+func (bal *balancerSuite) TestMultipleViews(c *check.C) {
+       bal.testMultipleViews(c, false)
+}
+
+func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
+       for i, srv := range bal.srvs {
+               // Add a mount to each service
+               srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
+               srv.mounts = append(srv.mounts, &KeepMount{
+                       KeepMount: arvados.KeepMount{
+                               DeviceID:    fmt.Sprintf("writable-by-srv-%x", (i+1)%len(bal.srvs)),
+                               UUID:        fmt.Sprintf("zzzzz-mount-%015x", i<<16),
+                               ReadOnly:    readonly,
+                               Replication: 1,
+                       },
+                       KeepService: srv,
+               })
+       }
+       for i := 1; i < len(bal.srvs); i++ {
+               c.Logf("i=%d", i)
+               if i == 4 {
+                       // Timestamps are all different, but one of
+                       // the mounts on srv[4] has the same device ID
+                       // where the non-deletable replica is stored
+                       // on srv[3], so only one replica is safe to
+                       // trash.
+                       bal.try(c, tester{
+                               desired:     map[string]int{"default": 1},
+                               current:     slots{0, i, i},
+                               shouldTrash: slots{i}})
+               } else if readonly {
+                       // Timestamps are all different, and the third
+                       // replica can't be trashed because it's on a
+                       // read-only mount, so the first two replicas
+                       // should be trashed.
+                       bal.try(c, tester{
+                               desired:     map[string]int{"default": 1},
+                               current:     slots{0, i, i},
+                               shouldTrash: slots{0, i}})
+               } else {
+                       // Timestamps are all different, so both
+                       // replicas on the non-optimal server should
+                       // be trashed.
+                       bal.try(c, tester{
+                               desired:     map[string]int{"default": 1},
+                               current:     slots{0, i, i},
+                               shouldTrash: slots{i, i}})
+               }
+               // If the three replicas have identical timestamps,
+               // none of them can be trashed safely.
+               bal.try(c, tester{
+                       desired:    map[string]int{"default": 1},
+                       current:    slots{0, i, i},
+                       timestamps: []int64{12345678, 12345678, 12345678}})
+               // If the first and third replicas have identical
+               // timestamps, only the second replica should be
+               // trashed.
+               bal.try(c, tester{
+                       desired:     map[string]int{"default": 1},
+                       current:     slots{0, i, i},
+                       timestamps:  []int64{12345678, 12345679, 12345678},
+                       shouldTrash: slots{i}})
+       }
+}
+
 func (bal *balancerSuite) TestFixUnbalanced(c *check.C) {
        bal.try(c, tester{
                desired:    map[string]int{"default": 2},
@@ -162,9 +231,10 @@ func (bal *balancerSuite) TestFixUnbalanced(c *check.C) {
 }
 
 func (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {
-       for _, srv := range bal.srvs {
+       for s, srv := range bal.srvs {
                for i := 0; i < 3; i++ {
                        m := *(srv.mounts[0])
+                       m.UUID = fmt.Sprintf("zzzzz-mount-%015x", (s<<10)+i)
                        srv.mounts = append(srv.mounts, &m)
                }
        }
index 46e69059c9c796c5b23318f8f9b78b4f3c83651e..d9338d0f9b9ef7ce9f4970fb131445c4ce508415 100644 (file)
@@ -23,6 +23,7 @@ type Replica struct {
 // replicas actually stored (according to the keepstore indexes we
 // know about).
 type BlockState struct {
+       Refs     map[string]bool // pdh => true (only tracked when len(Replicas)==0)
        RefCount int
        Replicas []Replica
        Desired  map[string]int
@@ -40,9 +41,21 @@ var defaultClasses = []string{"default"}
 
 func (bs *BlockState) addReplica(r Replica) {
        bs.Replicas = append(bs.Replicas, r)
+       // Free up memory wasted by tracking PDHs that will never be
+       // reported (see comment in increaseDesired)
+       bs.Refs = nil
 }
 
-func (bs *BlockState) increaseDesired(classes []string, n int) {
+func (bs *BlockState) increaseDesired(pdh string, classes []string, n int) {
+       if pdh != "" && len(bs.Replicas) == 0 {
+               // Note we only track PDHs if there's a possibility
+               // that we will report the list of referring PDHs,
+               // i.e., if we haven't yet seen a replica.
+               if bs.Refs == nil {
+                       bs.Refs = map[string]bool{}
+               }
+               bs.Refs[pdh] = true
+       }
        bs.RefCount++
        if len(classes) == 0 {
                classes = defaultClasses
@@ -109,11 +122,14 @@ func (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceI
 
 // IncreaseDesired updates the map to indicate the desired replication
 // for the given blocks in the given storage class is at least n.
-func (bsm *BlockStateMap) IncreaseDesired(classes []string, n int, blocks []arvados.SizedDigest) {
+//
+// If pdh is non-empty, it will be tracked and reported in the "lost
+// blocks" report.
+func (bsm *BlockStateMap) IncreaseDesired(pdh string, classes []string, n int, blocks []arvados.SizedDigest) {
        bsm.mutex.Lock()
        defer bsm.mutex.Unlock()
 
        for _, blkid := range blocks {
-               bsm.get(blkid).increaseDesired(classes, n)
+               bsm.get(blkid).increaseDesired(pdh, classes, n)
        }
 }
index 1e5fa5797855048bcb9db80487d7f6a8e4486787..534928bc82340bb23574f71ceac85c7872d8deb0 100644 (file)
@@ -43,6 +43,18 @@ func EachCollection(c *arvados.Client, pageSize int, f func(arvados.Collection)
                return err
        }
 
+       // Note the obvious way to get all collections (sorting by
+       // UUID) would be much easier, but would lose data: If a
+       // client were to move files from collection with uuid="zzz"
+       // to a collection with uuid="aaa" around the time when we
+       // were fetching the "mmm" page, we would never see those
+       // files' block IDs at all -- even if the client is careful to
+       // save "aaa" before saving "zzz".
+       //
+       // Instead, we get pages in modified_at order. Collections
+       // that are modified during the run will be re-fetched in a
+       // subsequent page.
+
        limit := pageSize
        if limit <= 0 {
                // Use the maximum page size the server allows
index 8f5d08a192ff81cb07bd88a90cb29d9a74f73c76..a79779c7dc8f9fdb5eb7316a74c28fb614d9da52 100644 (file)
@@ -71,11 +71,11 @@ func (s *integrationSuite) SetUpTest(c *check.C) {
 }
 
 func (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {
-       var logBuf *bytes.Buffer
+       var logBuf bytes.Buffer
        for iter := 0; iter < 20; iter++ {
-               logBuf := &bytes.Buffer{}
+               logBuf.Reset()
                logger := logrus.New()
-               logger.Out = logBuf
+               logger.Out = &logBuf
                opts := RunOptions{
                        CommitPulls: true,
                        CommitTrash: true,
index 3316a17240cddd69c37aa804e9bd5e2dbf610def..84516a821060da1b795da1b40655a9a62157fd52 100644 (file)
@@ -76,9 +76,10 @@ func main() {
                }
        }
        if *dumpFlag {
-               runOptions.Dumper = logrus.New()
-               runOptions.Dumper.Out = os.Stdout
-               runOptions.Dumper.Formatter = &logrus.TextFormatter{}
+               dumper := logrus.New()
+               dumper.Out = os.Stdout
+               dumper.Formatter = &logrus.TextFormatter{}
+               runOptions.Dumper = dumper
        }
        srv, err := NewServer(cfg, runOptions)
        if err != nil {
index 613a2f7d3c3da2e6c466ec8ac9345a9e2e70775e..894056c9f27756c9f452f904568d53f88f433c74 100644 (file)
@@ -57,6 +57,10 @@ type Config struct {
 
        // Timeout for outgoing http request/response cycle.
        RequestTimeout arvados.Duration
+
+       // Destination filename for the list of lost block hashes, one
+       // per line. Updated atomically during each successful run.
+       LostBlocksFile string
 }
 
 // RunOptions controls runtime behavior. The flags/options that belong
@@ -70,8 +74,8 @@ type RunOptions struct {
        Once        bool
        CommitPulls bool
        CommitTrash bool
-       Logger      *logrus.Logger
-       Dumper      *logrus.Logger
+       Logger      logrus.FieldLogger
+       Dumper      logrus.FieldLogger
 
        // SafeRendezvousState from the most recent balance operation,
        // or "" if unknown. If this changes from one run to the next,
@@ -86,8 +90,8 @@ type Server struct {
        metrics    *metrics
        listening  string // for tests
 
-       Logger *logrus.Logger
-       Dumper *logrus.Logger
+       Logger logrus.FieldLogger
+       Dumper logrus.FieldLogger
 }
 
 // NewServer returns a new Server that runs Balancers using the given
@@ -142,9 +146,10 @@ func (srv *Server) start() error {
 
 func (srv *Server) Run() (*Balancer, error) {
        bal := &Balancer{
-               Logger:  srv.Logger,
-               Dumper:  srv.Dumper,
-               Metrics: srv.metrics,
+               Logger:         srv.Logger,
+               Dumper:         srv.Dumper,
+               Metrics:        srv.metrics,
+               LostBlocksFile: srv.config.LostBlocksFile,
        }
        var err error
        srv.runOptions, err = bal.Run(srv.config, srv.runOptions)
index 6b5b233c2a6701912ce06b1356fdb864778d0cf8..3c17b3bd0641e2bee23007d775b1740e2c7a14d4 100644 (file)
@@ -26,7 +26,11 @@ import (
        "github.com/prometheus/client_golang/prometheus"
 )
 
-const azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
+const (
+       azureDefaultRequestTimeout       = arvados.Duration(10 * time.Minute)
+       azureDefaultListBlobsMaxAttempts = 12
+       azureDefaultListBlobsRetryDelay  = arvados.Duration(10 * time.Second)
+)
 
 var (
        azureMaxGetBytes           int
@@ -108,6 +112,8 @@ type AzureBlobVolume struct {
        ReadOnly              bool
        RequestTimeout        arvados.Duration
        StorageClasses        []string
+       ListBlobsRetryDelay   arvados.Duration
+       ListBlobsMaxAttempts  int
 
        azClient  storage.Client
        container *azureContainer
@@ -149,6 +155,12 @@ func (v *AzureBlobVolume) Type() string {
 
 // Start implements Volume.
 func (v *AzureBlobVolume) Start(vm *volumeMetricsVecs) error {
+       if v.ListBlobsRetryDelay == 0 {
+               v.ListBlobsRetryDelay = azureDefaultListBlobsRetryDelay
+       }
+       if v.ListBlobsMaxAttempts == 0 {
+               v.ListBlobsMaxAttempts = azureDefaultListBlobsMaxAttempts
+       }
        if v.ContainerName == "" {
                return errors.New("no container name given")
        }
@@ -486,8 +498,8 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
                Prefix:  prefix,
                Include: &storage.IncludeBlobDataset{Metadata: true},
        }
-       for {
-               resp, err := v.container.ListBlobs(params)
+       for page := 1; ; page++ {
+               resp, err := v.listBlobs(page, params)
                if err != nil {
                        return err
                }
@@ -517,6 +529,22 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
        }
 }
 
+// call v.container.ListBlobs, retrying if needed.
+func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
+       for i := 0; i < v.ListBlobsMaxAttempts; i++ {
+               resp, err = v.container.ListBlobs(params)
+               err = v.translateError(err)
+               if err == VolumeBusyError {
+                       log.Printf("ListBlobs: will retry page %d in %s after error: %s", page, v.ListBlobsRetryDelay, err)
+                       time.Sleep(time.Duration(v.ListBlobsRetryDelay))
+                       continue
+               } else {
+                       break
+               }
+       }
+       return
+}
+
 // Trash a Keep block.
 func (v *AzureBlobVolume) Trash(loc string) error {
        if v.ReadOnly {
@@ -674,8 +702,8 @@ func (v *AzureBlobVolume) EmptyTrash() {
        }
 
        params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
-       for {
-               resp, err := v.container.ListBlobs(params)
+       for page := 1; ; page++ {
+               resp, err := v.listBlobs(page, params)
                if err != nil {
                        log.Printf("EmptyTrash: ListBlobs: %v", err)
                        break
index cfad7577c59d850d25e9f2281a4ad374a60295af..8d02def1445c3f0d7f6ed5806c4c226b75e41644 100644 (file)
@@ -27,6 +27,7 @@ import (
        "testing"
        "time"
 
+       "git.curoverse.com/arvados.git/sdk/go/arvados"
        "github.com/Azure/azure-sdk-for-go/storage"
        "github.com/ghodss/yaml"
        "github.com/prometheus/client_golang/prometheus"
@@ -65,8 +66,9 @@ type azBlob struct {
 
 type azStubHandler struct {
        sync.Mutex
-       blobs map[string]*azBlob
-       race  chan chan struct{}
+       blobs      map[string]*azBlob
+       race       chan chan struct{}
+       didlist503 bool
 }
 
 func newAzStubHandler() *azStubHandler {
@@ -281,6 +283,11 @@ func (h *azStubHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
                rw.WriteHeader(http.StatusAccepted)
        case r.Method == "GET" && r.Form.Get("comp") == "list" && r.Form.Get("restype") == "container":
                // "List Blobs" API
+               if !h.didlist503 {
+                       h.didlist503 = true
+                       rw.WriteHeader(http.StatusServiceUnavailable)
+                       return
+               }
                prefix := container + "|" + r.Form.Get("prefix")
                marker := r.Form.Get("marker")
 
@@ -388,14 +395,17 @@ func NewTestableAzureBlobVolume(t TB, readonly bool, replication int) *TestableA
                        t.Fatal(err)
                }
        }
+       azClient.Sender = &singleSender{}
 
        bs := azClient.GetBlobService()
        v := &AzureBlobVolume{
-               ContainerName:    container,
-               ReadOnly:         readonly,
-               AzureReplication: replication,
-               azClient:         azClient,
-               container:        &azureContainer{ctr: bs.GetContainerReference(container)},
+               ContainerName:        container,
+               ReadOnly:             readonly,
+               AzureReplication:     replication,
+               ListBlobsMaxAttempts: 2,
+               ListBlobsRetryDelay:  arvados.Duration(time.Millisecond),
+               azClient:             azClient,
+               container:            &azureContainer{ctr: bs.GetContainerReference(container)},
        }
 
        return &TestableAzureBlobVolume{
index 51dd73a513c1d4c729a6743aaabe0cefa1202c4b..9a4d02df850fab836cdafaa4e21abb070b492782 100644 (file)
@@ -277,13 +277,19 @@ func (rtr *router) IndexHandler(resp http.ResponseWriter, req *http.Request) {
 
        for _, v := range vols {
                if err := v.IndexTo(prefix, resp); err != nil {
-                       // The only errors returned by IndexTo are
-                       // write errors returned by resp.Write(),
-                       // which probably means the client has
-                       // disconnected and this error will never be
-                       // reported to the client -- but it will
-                       // appear in our own error log.
-                       http.Error(resp, err.Error(), http.StatusInternalServerError)
+                       // We can't send an error message to the
+                       // client because we might have already sent
+                       // headers and index content. All we can do is
+                       // log the error in our own logs, and (in
+                       // cases where headers haven't been sent yet)
+                       // set a 500 status.
+                       //
+                       // If headers have already been sent, the
+                       // client must notice the lack of trailing
+                       // newline as an indication that the response
+                       // is incomplete.
+                       log.Printf("index error from volume %s: %s", v, err)
+                       http.Error(resp, "", http.StatusInternalServerError)
                        return
                }
        }
index 74933718c76ac8e0e499f62bf3ede740308ce073..878119634bbaf23fca3183ab37651e3274147e3e 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
@@ -64,7 +64,7 @@ GOSTUFF="$ARVBOX_DATA/gopath"
 RLIBS="$ARVBOX_DATA/Rlibs"
 
 getip() {
-    docker inspect $ARVBOX_CONTAINER | grep \"IPAddress\" | head -n1 | tr -d ' ":,\n' | cut -c10-
+    docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $ARVBOX_CONTAINER
 }
 
 gethost() {
@@ -103,8 +103,11 @@ wait_for_arvbox() {
     docker logs -f $ARVBOX_CONTAINER > $FF &
     LOGPID=$!
     while read line ; do
-        if echo $line | grep "ok: down: ready:" >/dev/null ; then
+        if [[ $line =~ "ok: down: ready:" ]] ; then
             kill $LOGPID
+           set +e
+           wait $LOGPID 2>/dev/null
+           set -e
        else
            echo $line
         fi
@@ -132,9 +135,14 @@ docker_run_dev() {
            "--volume=$NPMCACHE:/var/lib/npm:rw" \
            "--volume=$GOSTUFF:/var/lib/gopath:rw" \
            "--volume=$RLIBS:/var/lib/Rlibs:rw" \
+          --label "org.arvados.arvbox_config=$CONFIG" \
           "$@"
 }
 
+running_config() {
+    docker inspect $ARVBOX_CONTAINER -f '{{index .Config.Labels "org.arvados.arvbox_config"}}'
+}
+
 run() {
     CONFIG=$1
     TAG=$2
@@ -144,18 +152,22 @@ run() {
     need_setup=1
 
     if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then
-        if test "$CONFIG" = test ; then
+       if [[ $(running_config) != "$CONFIG" ]] ; then
+           echo "Container $ARVBOX_CONTAINER is '$(running_config)' config but requested '$CONFIG'; use restart or reboot"
+           return 1
+       fi
+        if test "$CONFIG" = test -o "$CONFIG" = devenv ; then
             need_setup=0
         else
             echo "Container $ARVBOX_CONTAINER is already running"
-            exit 0
+            return 0
         fi
     fi
 
     if test $need_setup = 1 ; then
         if docker ps -a | grep -E "$ARVBOX_CONTAINER$" -q ; then
             echo "Container $ARVBOX_CONTAINER already exists but is not running; use restart or reboot"
-            exit 1
+            return 1
         fi
     fi
 
@@ -165,11 +177,14 @@ run() {
            TAG=":$TAG"
             shift
         else
+           if [[ $TAG = '-' ]] ; then
+               shift
+           fi
             unset TAG
         fi
     fi
 
-    if echo "$CONFIG" | grep '^public' ; then
+    if [[ "$CONFIG" =~ ^public ]] ; then
         if test -n "$ARVBOX_PUBLISH_IP" ; then
             localip=$ARVBOX_PUBLISH_IP
         else
@@ -195,10 +210,10 @@ run() {
         PUBLIC=""
     fi
 
-    if echo "$CONFIG" | grep 'demo$' ; then
+    if [[ "$CONFIG" =~ demo$ ]] ; then
         if test -d "$ARVBOX_DATA" ; then
             echo "It looks like you already have a development container named $ARVBOX_CONTAINER."
-            echo "Set ARVBOX_CONTAINER to set a different name for your demo container"
+            echo "Set environment variable ARVBOX_CONTAINER to set a different name for your demo container"
             exit 1
         fi
 
@@ -211,6 +226,7 @@ run() {
                --name=$ARVBOX_CONTAINER \
                --privileged \
                --volumes-from $ARVBOX_CONTAINER-data \
+              --label "org.arvados.arvbox_config=$CONFIG" \
                $PUBLIC \
                arvados/arvbox-demo$TAG
         updateconf
@@ -218,7 +234,6 @@ run() {
     else
         mkdir -p "$PG_DATA" "$VAR_DATA" "$PASSENGER" "$GEMS" "$PIPCACHE" "$NPMCACHE" "$GOSTUFF" "$RLIBS"
 
-
         if ! test -d "$ARVADOS_ROOT" ; then
             git clone https://github.com/curoverse/arvados.git "$ARVADOS_ROOT"
         fi
@@ -232,7 +247,7 @@ run() {
             git clone https://github.com/curoverse/arvados-workbench2.git "$WORKBENCH2_ROOT"
         fi
 
-        if test "$CONFIG" = test ; then
+        if [[ "$CONFIG" = test ]] ; then
 
             mkdir -p $VAR_DATA/test
 
@@ -261,14 +276,36 @@ run() {
             fi
 
             docker exec -ti \
+                   -e LINES=$(tput lines) \
+                   -e COLUMNS=$(tput cols) \
+                   -e TERM=$TERM \
+                   -e WORKSPACE=/usr/src/arvados \
+                   -e GEM_HOME=/var/lib/gems \
                    $ARVBOX_CONTAINER \
                    /usr/local/lib/arvbox/runsu.sh \
                    /usr/src/arvados/build/run-tests.sh \
                    --temp /var/lib/arvados/test \
-                   WORKSPACE=/usr/src/arvados \
-                   GEM_HOME=/var/lib/gems \
                    "$@"
-        elif echo "$CONFIG" | grep 'dev$' ; then
+        elif [[ "$CONFIG" = devenv ]] ; then
+           if [[ $need_setup = 1 ]] ; then
+               docker_run_dev \
+                    --detach \
+                   --name=${ARVBOX_CONTAINER} \
+                   "--env=SVDIR=/etc/devenv-service" \
+                   "--volume=$HOME:$HOME:rw" \
+                   --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \
+                   arvados/arvbox-dev$TAG
+           fi
+           exec docker exec --interactive --tty \
+                -e LINES=$(tput lines) \
+                -e COLUMNS=$(tput cols) \
+                -e TERM=$TERM \
+                -e "ARVBOX_HOME=$HOME" \
+                -e "DISPLAY=$DISPLAY" \
+                --workdir=$PWD \
+                ${ARVBOX_CONTAINER} \
+                /usr/local/lib/arvbox/devenv.sh "$@"
+        elif [[ "$CONFIG" =~ dev$ ]] ; then
             docker_run_dev \
                    --detach \
                    --name=$ARVBOX_CONTAINER \
@@ -344,11 +381,11 @@ build() {
 
 check() {
     case "$1" in
-        localdemo|publicdemo|dev|publicdev|test)
+        localdemo|publicdemo|dev|publicdev|test|devenv)
             true
             ;;
         *)
-            echo "Argument to $subcmd must be one of localdemo, publicdemo, dev, publicdev, test"
+            echo "Argument to $subcmd must be one of localdemo, publicdemo, dev, publicdev, test, devenv"
             exit 1
         ;;
     esac
@@ -375,7 +412,7 @@ case "$subcmd" in
         ;;
 
     sh*)
-        exec docker exec -ti \
+        exec docker exec --interactive --tty \
               -e LINES=$(tput lines) \
               -e COLUMNS=$(tput cols) \
               -e TERM=$TERM \
@@ -383,6 +420,17 @@ case "$subcmd" in
               $ARVBOX_CONTAINER /bin/bash
         ;;
 
+    ash*)
+        exec docker exec --interactive --tty \
+              -e LINES=$(tput lines) \
+              -e COLUMNS=$(tput cols) \
+              -e TERM=$TERM \
+              -e GEM_HOME=/var/lib/gems \
+              -u arvbox \
+              -w /usr/src/arvados \
+              $ARVBOX_CONTAINER /bin/bash --login
+        ;;
+
     pipe)
         exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash -
         ;;
@@ -524,63 +572,36 @@ case "$subcmd" in
        echo "Certificate copied to $CERT"
        ;;
 
-    devenv)
-       set -x
-       if docker ps -a --filter "status=exited" | grep -E "${ARVBOX_CONTAINER}-devenv$" -q ; then
-           docker start ${ARVBOX_CONTAINER}-devenv
-       elif ! (docker ps -a --filter "status=running" | grep -E "${ARVBOX_CONTAINER}-devenv$" -q) ; then
-           docker_run_dev \
-                 --detach \
-                --name=${ARVBOX_CONTAINER}-devenv \
-                "--env=SVDIR=/etc/devenv-service" \
-                "--volume=$HOME:$HOME:rw" \
-                --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \
-                arvados/arvbox-dev$TAG
-       fi
-
-       exec docker exec --interactive --tty \
-            -e LINES=$(tput lines) \
-            -e COLUMNS=$(tput cols) \
-            -e TERM=$TERM \
-            -e "ARVBOX_HOME=$HOME" \
-            -e "DISPLAY=$DISPLAY" \
-            --workdir=$PWD \
-            ${ARVBOX_CONTAINER}-devenv \
-            /usr/local/lib/arvbox/devenv.sh "$@"
-       ;;
-
-    devenv-stop)
-       docker stop ${ARVBOX_CONTAINER}-devenv
-       ;;
-
-    devenv-reset)
-       docker stop ${ARVBOX_CONTAINER}-devenv
-       docker rm ${ARVBOX_CONTAINER}-devenv
+    psql)
+       exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados'
        ;;
 
     *)
-        echo "Arvados-in-a-box                      http://arvados.org"
+        echo "Arvados-in-a-box             https://doc.arvados.org/install/arvbox.html"
         echo
-        echo "start|run <config> [tag]  start $ARVBOX_CONTAINER container"
-        echo "stop       stop arvbox container"
-        echo "restart <config>  stop, then run again"
-        echo "status     print some information about current arvbox"
-        echo "ip         print arvbox docker container ip address"
-        echo "host       print arvbox published host"
-        echo "shell      enter arvbox shell"
-        echo "open       open arvbox workbench in a web browser"
-        echo "root-cert  get copy of root certificate"
-        echo "update  <config> stop, pull latest image, run"
-        echo "build   <config> build arvbox Docker image"
-        echo "reboot  <config> stop, build arvbox Docker image, run"
-        echo "rebuild <config> build arvbox Docker image, no layer cache"
-        echo "reset      delete arvbox arvados data (be careful!)"
-        echo "destroy    delete all arvbox code and data (be careful!)"
-        echo "log <service> tail log of specified service"
-        echo "ls <options>  list directories inside arvbox"
-        echo "cat <files>   get contents of files inside arvbox"
-        echo "pipe       run a bash script piped in from stdin"
-        echo "sv <start|stop|restart> <service> change state of service inside arvbox"
-        echo "clone <from> <to>   clone an arvbox"
+        echo "start|run <config> [tag]   start $ARVBOX_CONTAINER container"
+        echo "stop               stop arvbox container"
+        echo "restart <config>   stop, then run again"
+        echo "status             print some information about current arvbox"
+        echo "ip                 print arvbox docker container ip address"
+        echo "host               print arvbox published host"
+        echo "shell              enter shell as root"
+        echo "ashell             enter shell as 'arvbox'"
+        echo "psql               enter postgres console"
+        echo "open               open arvbox workbench in a web browser"
+        echo "root-cert          get copy of root certificate"
+        echo "update  <config>   stop, pull latest image, run"
+        echo "build   <config>   build arvbox Docker image"
+        echo "reboot  <config>   stop, build arvbox Docker image, run"
+        echo "rebuild <config>   build arvbox Docker image, no layer cache"
+        echo "reset              delete arvbox arvados data (be careful!)"
+        echo "destroy            delete all arvbox code and data (be careful!)"
+        echo "log <service>      tail log of specified service"
+        echo "ls <options>       list directories inside arvbox"
+        echo "cat <files>        get contents of files inside arvbox"
+        echo "pipe               run a bash script piped in from stdin"
+        echo "sv <start|stop|restart> <service> "
+       echo "                   change state of service inside arvbox"
+        echo "clone <from> <to>  clone dev arvbox"
         ;;
 esac
index 741bd33c4998cab201e6e9e60f0c58a69a3414fd..65171de3d25e894c6fbfc86b862959b1cc22a606 100644 (file)
@@ -85,6 +85,14 @@ ENV NODEVERSION v8.15.1
 RUN curl -L -f https://nodejs.org/dist/${NODEVERSION}/node-${NODEVERSION}-linux-x64.tar.xz | tar -C /usr/local -xJf - && \
     ln -s ../node-${NODEVERSION}-linux-x64/bin/node ../node-${NODEVERSION}-linux-x64/bin/npm /usr/local/bin
 
+ENV GRADLEVERSION 5.3.1
+
+RUN cd /tmp && \
+    curl -L -O https://services.gradle.org/distributions/gradle-${GRADLEVERSION}-bin.zip && \
+    unzip gradle-${GRADLEVERSION}-bin.zip -d /usr/local && \
+    ln -s ../gradle-${GRADLEVERSION}/bin/gradle /usr/local/bin && \
+    rm gradle-${GRADLEVERSION}-bin.zip
+
 # Set UTF-8 locale
 RUN echo en_US.UTF-8 UTF-8 > /etc/locale.gen && locale-gen
 ENV LANG en_US.UTF-8
index e9721fd55d87c1e5a597f3a56b632310321bb8d1..c6270457d5af8f1e9b8bab9ac2de004014744d55 100755 (executable)
@@ -28,10 +28,12 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
     useradd --home-dir /var/lib/arvados/git --uid $HOSTUID --gid $HOSTGID --non-unique git
     useradd --groups docker crunch
 
-    chown arvbox:arvbox -R /usr/local /var/lib/arvados /var/lib/gems \
-          /var/lib/passenger /var/lib/postgresql \
-          /var/lib/nginx /var/log/nginx /etc/ssl/private \
-          /var/lib/gopath /var/lib/pip /var/lib/npm
+    if [[ "$1" != --no-chown ]] ; then
+       chown arvbox:arvbox -R /usr/local /var/lib/arvados /var/lib/gems \
+              /var/lib/passenger /var/lib/postgresql \
+              /var/lib/nginx /var/log/nginx /etc/ssl/private \
+              /var/lib/gopath /var/lib/pip /var/lib/npm
+    fi
 
     mkdir -p /var/lib/gems/ruby
     chown arvbox:arvbox -R /var/lib/gems/ruby
index 9ab3ac4c38cfd530e3a050c2040b73744c95f915..4df5463f1f06101b5dd82ac61281df805fa15721 100755 (executable)
@@ -3,7 +3,7 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh
+flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh --no-chown
 
 if [[ -n "$*" ]] ; then
     exec su --preserve-environment arvbox -c "$*"
index bf905a394606a4e9a1465979a575bf07e85e0657..884f16b4a7db36f64d95bc256938352837f91412 100644 (file)
@@ -167,9 +167,9 @@ class Summarizer(object):
             if task.finishtime is None or timestamp > task.finishtime:
                 task.finishtime = timestamp
 
-            if self.starttime is None or timestamp < task.starttime:
+            if self.starttime is None or timestamp < self.starttime:
                 self.starttime = timestamp
-            if self.finishtime is None or timestamp < task.finishtime:
+            if self.finishtime is None or timestamp > self.finishtime:
                 self.finishtime = timestamp
 
             if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
@@ -207,17 +207,18 @@ class Summarizer(object):
                     stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)
                 if 'tx' in stats or 'rx' in stats:
                     stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)
-                for stat, val in stats.items():
-                    if group == 'interval':
-                        if stat == 'seconds':
-                            this_interval_s = val
-                            continue
-                        elif not (this_interval_s > 0):
+                if group == 'interval':
+                    if 'seconds' in stats:
+                        this_interval_s = stats.get('seconds',0)
+                        del stats['seconds']
+                        if this_interval_s <= 0:
                             logger.error(
                                 "BUG? interval stat given with duration {!r}".
                                 format(this_interval_s))
-                            continue
-                        else:
+                    else:
+                        logger.error('BUG? interval stat missing duration')
+                for stat, val in stats.items():
+                    if group == 'interval' and this_interval_s:
                             stat = stat + '__rate'
                             val = val / this_interval_s
                             if stat in ['user+sys__rate', 'tx+rx__rate']:
diff --git a/tools/keep-xref/keep-xref.py b/tools/keep-xref/keep-xref.py
new file mode 100755 (executable)
index 0000000..7bc4158
--- /dev/null
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+#
+
+from __future__ import print_function, absolute_import
+import argparse
+import arvados
+import arvados.util
+import csv
+import sys
+import logging
+
+lglvl = logging.INFO+1
+logging.basicConfig(level=lglvl, format='%(message)s')
+
+"""
+ Given a list of collections missing blocks (as produced by
+keep-balance), produce a report listing affected collections and
+container requests.
+"""
+
+def rerun_request(arv, container_requests_to_rerun, ct):
+    requests = arvados.util.list_all(arv.container_requests().list, filters=[["container_uuid", "=", ct["uuid"]]])
+    for cr in requests:
+        if cr["requesting_container_uuid"]:
+            rerun_request(arv, container_requests_to_rerun, arv.containers().get(uuid=cr["requesting_container_uuid"]).execute())
+        else:
+            container_requests_to_rerun[cr["uuid"]] = cr
+
+def get_owner(arv, owners, record):
+    uuid = record["owner_uuid"]
+    if uuid not in owners:
+        if uuid[6:11] == "tpzed":
+            owners[uuid] = (arv.users().get(uuid=uuid).execute()["full_name"], uuid)
+        else:
+            grp = arv.groups().get(uuid=uuid).execute()
+            _, ou = get_owner(arv, owners, grp)
+            owners[uuid] = (grp["name"], ou)
+    return owners[uuid]
+
+def main():
+    parser = argparse.ArgumentParser(description='Re-run containers associated with missing blocks')
+    parser.add_argument('inp')
+    args = parser.parse_args()
+
+    arv = arvados.api('v1')
+
+    busted_collections = set()
+
+    logging.log(lglvl, "Reading %s", args.inp)
+
+    # Get the list of bad collection PDHs
+    blocksfile = open(args.inp, "rt")
+    for line in blocksfile:
+        # Ignore the first item, that's the block id
+        collections = line.rstrip().split(" ")[1:]
+        for c in collections:
+            busted_collections.add(c)
+
+    out = csv.writer(sys.stdout)
+
+    out.writerow(("collection uuid", "container request uuid", "record name", "modified at", "owner uuid", "owner name", "root owner uuid", "root owner name", "notes"))
+
+    logging.log(lglvl, "Finding collections")
+
+    owners = {}
+    collections_to_delete = {}
+    container_requests_to_rerun = {}
+    # Get containers that produced these collections
+    i = 0
+    for b in busted_collections:
+        if (i % 100) == 0:
+            logging.log(lglvl, "%d/%d", i, len(busted_collections))
+        i += 1
+        collections_to_delete = arvados.util.list_all(arv.collections().list, filters=[["portable_data_hash", "=", b]])
+        for d in collections_to_delete:
+            t = ""
+            if d["properties"].get("type") not in ("output", "log"):
+                t = "\"type\" was '%s', expected one of 'output' or 'log'" % d["properties"].get("type")
+            ou = get_owner(arv, owners, d)
+            out.writerow((d["uuid"], "", d["name"], d["modified_at"], d["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], t))
+
+        maybe_containers_to_rerun = arvados.util.list_all(arv.containers().list, filters=[["output", "=", b]])
+        for ct in maybe_containers_to_rerun:
+            rerun_request(arv, container_requests_to_rerun, ct)
+
+    logging.log(lglvl, "%d/%d", i, len(busted_collections))
+    logging.log(lglvl, "Finding container requests")
+
+    i = 0
+    for _, cr in container_requests_to_rerun.items():
+        if (i % 100) == 0:
+            logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+        i += 1
+        ou = get_owner(arv, owners, cr)
+        out.writerow(("", cr["uuid"], cr["name"], cr["modified_at"], cr["owner_uuid"], ou[0], ou[1], owners[ou[1]][0], ""))
+
+    logging.log(lglvl, "%d/%d", i, len(container_requests_to_rerun))
+
+if __name__ == "__main__":
+    main()