From: Eric Biagiotti Date: Fri, 19 Apr 2019 14:59:24 +0000 (-0400) Subject: Merge branch 'master' into 14723-cwl-multiple-file-targets X-Git-Tag: 1.4.0~62^2 X-Git-Url: https://git.arvados.org/arvados.git/commitdiff_plain/454ee2b8f0385c542b6f1165a3baf2820425e1a3?hp=25107815e9da9483bb7e6055379420113bfc3640 Merge branch 'master' into 14723-cwl-multiple-file-targets refs #14723 Arvados-DCO-1.1-Signed-off-by: Eric Biagiotti --- diff --git a/.licenseignore b/.licenseignore index 45028bf888..405fe8ddf9 100644 --- a/.licenseignore +++ b/.licenseignore @@ -13,6 +13,7 @@ build/package-test-dockerfiles/ubuntu1604/etc-apt-preferences.d-arvados *by-sa-3.0.txt *COPYING doc/fonts/* +doc/_includes/_config_default_yml.liquid doc/user/cwl/federated/* */docker_image docker/jobs/apt.arvados.org*.list @@ -59,6 +60,7 @@ sdk/pam/examples/shellinabox sdk/pam/pam-configs/arvados sdk/python/tests/data/* services/api/config/unbound.template +services/api/config/config.default.yml services/arv-web/sample-cgi-app/public/.htaccess services/arv-web/sample-cgi-app/public/index.cgi services/keepproxy/pkg-extras/etc/default/keepproxy diff --git a/build/run-library.sh b/build/run-library.sh index 1daceff239..01a6a06c14 100755 --- a/build/run-library.sh +++ b/build/run-library.sh @@ -352,6 +352,15 @@ handle_rails_package() { if [[ "$pkgname" != "arvados-workbench" ]]; then exclude_list+=('config/database.yml') fi + # for arvados-api-server, we need to dereference the + # config/config.default.yml file. There is no fpm way to do that, sadly + # (excluding the existing symlink and then adding the file from its source + # path doesn't work, sadly. + if [[ "$pkgname" == "arvados-api-server" ]]; then + mv /arvados/services/api/config/config.default.yml /arvados/services/api/config/config.default.yml.bu + cp -p /arvados/lib/config/config.default.yml /arvados/services/api/config/ + exclude_list+=('config/config.default.yml.bu') + fi for exclude in ${exclude_list[@]}; do switches+=(-x "$exclude_root/$exclude") done @@ -359,6 +368,11 @@ handle_rails_package() { -x "$exclude_root/vendor/cache-*" \ -x "$exclude_root/vendor/bundle" "$@" "$license_arg" rm -rf "$scripts_dir" + # Undo the deferencing we did above + if [[ "$pkgname" == "arvados-api-server" ]]; then + rm -f /arvados/services/api/config/config.default.yml + mv /arvados/services/api/config/config.default.yml.bu /arvados/services/api/config/config.default.yml + fi } # Build python packages with a virtualenv built-in diff --git a/build/run-tests.sh b/build/run-tests.sh index a37a0f731e..dbf13940b2 100755 --- a/build/run-tests.sh +++ b/build/run-tests.sh @@ -394,7 +394,7 @@ start_services() { return 0 fi . "$VENVDIR/bin/activate" - echo 'Starting API, keepproxy, keep-web, ws, arv-git-httpd, and nginx ssl proxy...' + echo 'Starting API, controller, keepproxy, keep-web, arv-git-httpd, ws, and nginx ssl proxy...' if [[ ! -d "$WORKSPACE/services/api/log" ]]; then mkdir -p "$WORKSPACE/services/api/log" fi @@ -749,6 +749,7 @@ do_test_once() { title "test $1" timer_reset + result= if which deactivate >/dev/null; then deactivate; fi if ! . "$VENVDIR/bin/activate" then @@ -821,6 +822,7 @@ do_install_once() { title "install $1" timer_reset + result= if which deactivate >/dev/null; then deactivate; fi if [[ "$1" != "env" ]] && ! . "$VENVDIR/bin/activate"; then result=1 diff --git a/doc/_config.yml b/doc/_config.yml index a5b53442ca..c7f9bd1978 100644 --- a/doc/_config.yml +++ b/doc/_config.yml @@ -154,8 +154,11 @@ navbar: admin: - Topics: - admin/index.html.textile.liquid + - Configuration: + - admin/config.html.textile.liquid - Upgrading and migrations: - admin/upgrading.html.textile.liquid + - admin/config-migration.html.textile.liquid - install/migrate-docker19.html.textile.liquid - admin/upgrade-crunch2.html.textile.liquid - Users and Groups: @@ -174,6 +177,8 @@ navbar: - Other: - admin/collection-versioning.html.textile.liquid - admin/federation.html.textile.liquid + - admin/controlling-container-reuse.html.textile.liquid + - admin/logs-table-management.html.textile.liquid installguide: - Overview: - install/index.html.textile.liquid diff --git a/doc/_includes/_config_default_yml.liquid b/doc/_includes/_config_default_yml.liquid new file mode 120000 index 0000000000..457d6fa63f --- /dev/null +++ b/doc/_includes/_config_default_yml.liquid @@ -0,0 +1 @@ +../../lib/config/config.default.yml \ No newline at end of file diff --git a/doc/admin/config-migration.html.textile.liquid b/doc/admin/config-migration.html.textile.liquid new file mode 100644 index 0000000000..11546c0323 --- /dev/null +++ b/doc/admin/config-migration.html.textile.liquid @@ -0,0 +1,50 @@ +--- +layout: default +navsection: admin +title: Migrating Configuration +... + +{% comment %} +Copyright (C) The Arvados Authors. All rights reserved. + +SPDX-License-Identifier: CC-BY-SA-3.0 +{% endcomment %} + +Arvados is migrating to a centralized configuration file for all components. The centralized Arvados configuration is @/etc/arvados/config.yml@. Components that support the new centralized configuration are listed below. Components not listed here do not yet support centralized configuration. During the migration period, legacy configuration files will continue to be loaded and take precedence over the centralized configuration file. + +h2. API server + +The legacy API server configuration is stored in @config/application.yml@ and @config/database.yml@. After migration to @/etc/arvados/config.yml@, both of these files should be moved out of the way and/or deleted. + +Change to the API server directory and use the following commands: + +
+$ bundle exec rake config:migrate > config.yml
+$ cp config.yml /etc/arvados/config.yml
+
+ +This will print the contents of @config.yml@ after merging with legacy @application.yml@. It may then be redirected to a file and copied to @/etc/arvados/config.yml@. + +If you wish to update @config.yml@ configuration by hand, or check that everything has been migrated, use @config:diff@ to print configuration items that differ between @application.yml@ and the system @config.yml@. + +
+$ bundle exec rake config:diff
+
+ +This command will also report if no migrations are required. + +h2. crunch-dispatch-slurm + +Currently only reads @InstanceTypes@ from centralized configuration. Still requires component-specific configuration file. + +h2. keepstore + +Currently only reads @RemoteClusters@ from centralized configuration. Still requires component-specific configuration file. + +h2. arvados-controller + +Only supports centralized config file. No migration needed. + +h2. arvados-dispatch-cloud + +Only supports centralized config file. No migration needed. diff --git a/doc/admin/config.html.textile.liquid b/doc/admin/config.html.textile.liquid new file mode 100644 index 0000000000..a1dcdb3bb8 --- /dev/null +++ b/doc/admin/config.html.textile.liquid @@ -0,0 +1,19 @@ +--- +layout: default +navsection: admin +title: Configuration reference +... + +{% comment %} +Copyright (C) The Arvados Authors. All rights reserved. + +SPDX-License-Identifier: CC-BY-SA-3.0 +{% endcomment %} + +The master Arvados configuration is stored at @/etc/arvados/config.yml@ + +See "Migrating Configuration":config-migration.html for information about migrating from legacy component-specific configuration files. + +{% codeblock as yaml %} +{% include 'config_default_yml' %} +{% endcodeblock %} diff --git a/doc/admin/controlling-container-reuse.html.textile.liquid b/doc/admin/controlling-container-reuse.html.textile.liquid new file mode 100644 index 0000000000..76f57f31a5 --- /dev/null +++ b/doc/admin/controlling-container-reuse.html.textile.liquid @@ -0,0 +1,21 @@ +--- +layout: default +navsection: admin +title: Controlling container reuse +... + +{% comment %} +Copyright (C) The Arvados Authors. All rights reserved. + +SPDX-License-Identifier: CC-BY-SA-3.0 +{% endcomment %} + +This page describes how an admin can control container reuse using the @arv@ command. This can be utilized to avoid reusing a completed container without disabling reuse for the corresponding steps in affected workflows. For example, if a container exited successfully but produced bad output, it may not be feasible to update the workflow immediately. Meanwhile, changing the state of the container from @Complete@ to @Cancelled@ will prevent it from being used in subsequent workflows. + +If a container is in the @Complete@ state, the following @arv@ command will change its state to @Cancelled@, where @xxxxx-xxxxx-xxxxxxxxxxxxxxx@ is the @UUID@ of the container: + +
arv container update -u xxxxx-xxxxx-xxxxxxxxxxxxxxx -c '{"state":"Cancelled"}'
+ +Use the following command to list all containers that exited with 0 and were then cancelled: + +
arv container list --filters='[["state", "=", "Cancelled"], ["exit_code", "=", 0]]'
See the "arv CLI tool overview":{{site.baseurl}}/sdk/cli/index.html for more details about using the @arv@ command. diff --git a/doc/admin/logs-table-management.html.textile.liquid b/doc/admin/logs-table-management.html.textile.liquid new file mode 100644 index 0000000000..dedd960f88 --- /dev/null +++ b/doc/admin/logs-table-management.html.textile.liquid @@ -0,0 +1,55 @@ +--- +layout: default +navsection: admin +title: "Logs table management" +... + +{% comment %} +Copyright (C) The Arvados Authors. All rights reserved. + +SPDX-License-Identifier: CC-BY-SA-3.0 +{% endcomment %} + +This page aims to provide insight about managing the ever growing API Server's logs table. + +h3. Logs table purpose & behavior + +This database table currently serves three purposes: +* It's an audit log, permitting admins and users to look up the time and details of past changes to Arvados objects via @arvados.v1.logs.*@ endpoints. +* It's a mechanism for passing cache-invalidation events, used by websocket servers, the Python SDK "events" library, and @arvados-cwl-runner@ to detect when an object has changed. +* It's a staging area for stdout/stderr text coming from users' containers, permitting users to see what their containers are doing while they are still running (i.e., before those text files are written to Keep). + +As a result, this table grows indefinitely, even on sites where policy does not require an audit log; making backups, migrations, and upgrades unnecessarily slow and painful. + +h3. API Server configuration + +To solve the problem mentioned above, the API server offers the possibility to limit the amount of log information stored on the table: + +
+# Time to keep audit logs (a row in the log table added each time an
+# Arvados object is created, modified, or deleted) in the PostgreSQL
+# database. Currently, websocket event notifications rely on audit
+# logs, so this should not be set lower than 600 (10 minutes).
+max_audit_log_age: 1209600
+
+ +...and to prevent surprises and avoid bad database behavior (especially the first time the cleanup job runs on an existing cluster with a huge backlog) a maximum number of rows to delete in a single transaction. + +
+# Maximum number of log rows to delete in a single SQL transaction.
+#
+# If max_audit_log_delete_batch is 0, log entries will never be
+# deleted by Arvados. Cleanup can be done by an external process
+# without affecting any Arvados system processes, as long as very
+# recent (<5 minutes old) logs are not deleted.
+#
+# 100000 is a reasonable batch size for most sites.
+max_audit_log_delete_batch: 0
+
+ +This feature works when both settings are non-zero, periodically dispatching a background task that deletes all log rows older than @max_audit_log_age@. +The events being cleaned up by this process don't include job/container stderr logs (they're handled by the existing @delete job/container logs@ rake tasks) + +h3. Additional consideration + +Depending on the local installation's audit requirements, the cluster admins should plan for an external backup procedure before enabling this feature, as this information is not replicated anywhere else. diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid index 6e2e6cba6d..09bef2a62a 100644 --- a/doc/admin/upgrading.html.textile.liquid +++ b/doc/admin/upgrading.html.textile.liquid @@ -125,6 +125,10 @@ h4. Centos7 package for libpam-arvados depends on the python-pam package, which As part of story "#9945":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7. +h4. New configuration + +Arvados is migrating to a centralized configuration file for all components. During the migration, legacy configuration files will continue to be loaded. See "Migrating Configuration":config-migration.html for details. + h3. v1.3.0 (2018-12-05) This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections. diff --git a/doc/api/methods/containers.html.textile.liquid b/doc/api/methods/containers.html.textile.liquid index f0ce8e362f..d59c66edc3 100644 --- a/doc/api/methods/containers.html.textile.liquid +++ b/doc/api/methods/containers.html.textile.liquid @@ -65,9 +65,11 @@ table(table table-bordered table-condensed). |Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled| |Locked|A dispatcher has "taken" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled| |Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled| -|Complete|Container was running, and the contained process/command has exited.|-| +|Complete|Container was running, and the contained process/command has exited.|Cancelled| |Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-| +See "Controlling container reuse":{{site.baseurl}}/admin/controlling-container-reuse.html for details about changing state from @Complete@ to @Cancelled@ + h2(#mount_types). {% include 'mount_types' %} h2(#runtime_constraints). {% include 'container_runtime_constraints' %} diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid index 2d94d32ac5..64cc9c6f89 100644 --- a/doc/install/arvbox.html.textile.liquid +++ b/doc/install/arvbox.html.textile.liquid @@ -29,29 +29,32 @@ h2. Usage
 $ arvbox
-Arvados-in-a-box                      http://arvados.org
-
-start|run  [tag]  start arvbox container
-stop       stop arvbox container
-restart   stop, then run again
-status     print some information about current arvbox
-ip         print arvbox docker container ip address
-host       print arvbox published host
-shell      enter arvbox shell
-open       open arvbox workbench in a web browser
-root-cert  get copy of root certificate
-update   stop, pull latest image, run
-build    build arvbox Docker image
-reboot   stop, build arvbox Docker image, run
-rebuild  build arvbox Docker image, no layer cache
-reset      delete arvbox arvados data (be careful!)
-destroy    delete all arvbox code and data (be careful!)
-log  tail log of specified service
-ls   list directories inside arvbox
-cat    get contents of files inside arvbox
-pipe       run a bash script piped in from stdin
-sv   change state of service inside arvbox
-clone     clone an arvbox
+Arvados-in-a-box             https://doc.arvados.org/install/arvbox.html
+
+start|run  [tag]   start arvbox container
+stop               stop arvbox container
+restart    stop, then run again
+status             print some information about current arvbox
+ip                 print arvbox docker container ip address
+host               print arvbox published host
+shell              enter shell as root
+ashell             enter shell as 'arvbox'
+psql               enter postgres console
+open               open arvbox workbench in a web browser
+root-cert          get copy of root certificate
+update     stop, pull latest image, run
+build      build arvbox Docker image
+reboot     stop, build arvbox Docker image, run
+rebuild    build arvbox Docker image, no layer cache
+reset              delete arvbox arvados data (be careful!)
+destroy            delete all arvbox code and data (be careful!)
+log       tail log of specified service
+ls        list directories inside arvbox
+cat         get contents of files inside arvbox
+pipe               run a bash script piped in from stdin
+sv  
+                   change state of service inside arvbox
+clone    clone dev arvbox
 
h2. Install root certificate @@ -85,7 +88,11 @@ Demo configuration. Boots a complete Arvados environment inside the container. h3. test -Run the test suite. +Starts postgres and initializes the API server, then runs the Arvados test suite. Will pass command line arguments to test runner. Supports test runner interactive mode. + +h3. devenv + +Starts a minimal container with no services and the host's $HOME bind mounted inside the container, then enters an interactive login shell. Intended to make it convenient to use tools installed in arvbox that don't require services. h3. publicdev diff --git a/doc/install/install-nodemanager.html.textile.liquid b/doc/install/install-nodemanager.html.textile.liquid index defec2589e..770527da1f 100644 --- a/doc/install/install-nodemanager.html.textile.liquid +++ b/doc/install/install-nodemanager.html.textile.liquid @@ -556,7 +556,7 @@ subscription_id = 00000000-0000-0000-0000-000000000000 # https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/ # and updated for v2 of the Azure cli tool. # -# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://" --password +# az ad app create --display-name "Node Manager" --homepage "https://arvados.org" --identifier-uris "https://" --password --end-date # az ad sp create "" # az role assignment create --assignee "" --role Owner --resource-group "" # diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml new file mode 100644 index 0000000000..bea6387532 --- /dev/null +++ b/lib/config/config.default.yml @@ -0,0 +1,455 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 + +# Do not use this file for site configuration. Create +# /etc/arvados/config.yml instead. +# +# The order of precedence (highest to lowest): +# 1. Legacy component-specific config files (deprecated) +# 2. /etc/arvados/config.yml +# 3. config.default.yml + +Clusters: + xxxxx: + SystemRootToken: "" + + # Token to be included in all healthcheck requests. Disabled by default. + # Server expects request header of the format "Authorization: Bearer xxx" + ManagementToken: "" + + Services: + RailsAPI: + InternalURLs: {} + GitHTTP: + InternalURLs: {} + ExternalURL: "" + Keepstore: + InternalURLs: {} + Controller: + InternalURLs: {} + ExternalURL: "" + Websocket: + InternalURLs: {} + ExternalURL: "" + Keepbalance: + InternalURLs: {} + GitHTTP: + InternalURLs: {} + ExternalURL: "" + GitSSH: + ExternalURL: "" + DispatchCloud: + InternalURLs: {} + SSO: + ExternalURL: "" + Keepproxy: + InternalURLs: {} + ExternalURL: "" + WebDAV: + InternalURLs: {} + ExternalURL: "" + WebDAVDownload: + InternalURLs: {} + ExternalURL: "" + Keepstore: + InternalURLs: {} + Composer: + ExternalURL: "" + WebShell: + ExternalURL: "" + Workbench1: + InternalURLs: {} + ExternalURL: "" + Workbench2: + ExternalURL: "" + PostgreSQL: + # max concurrent connections per arvados server daemon + ConnectionPool: 32 + Connection: + # All parameters here are passed to the PG client library in a connection string; + # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS + Host: "" + Port: 0 + User: "" + Password: "" + DBName: "" + API: + # Maximum size (in bytes) allowed for a single API request. This + # limit is published in the discovery document for use by clients. + # Note: You must separately configure the upstream web server or + # proxy to actually enforce the desired maximum request size on the + # server side. + MaxRequestSize: 134217728 + + # Limit the number of bytes read from the database during an index + # request (by retrieving and returning fewer rows than would + # normally be returned in a single response). + # Note 1: This setting never reduces the number of returned rows to + # zero, no matter how big the first data row is. + # Note 2: Currently, this is only checked against a specific set of + # columns that tend to get large (collections.manifest_text, + # containers.mounts, workflows.definition). Other fields (e.g., + # "properties" hashes) are not counted against this limit. + MaxIndexDatabaseRead: 134217728 + + # Maximum number of items to return when responding to a APIs that + # can return partial result sets using limit and offset parameters + # (e.g., *.index, groups.contents). If a request specifies a "limit" + # parameter higher than this value, this value is used instead. + MaxItemsPerResponse: 1000 + + # API methods to disable. Disabled methods are not listed in the + # discovery document, and respond 404 to all requests. + # Example: ["jobs.create", "pipeline_instances.create"] + DisabledAPIs: [] + + # Interval (seconds) between asynchronous permission view updates. Any + # permission-updating API called with the 'async' parameter schedules a an + # update on the permission view in the future, if not already scheduled. + AsyncPermissionsUpdateInterval: 20 + + # RailsSessionSecretToken is a string of alphanumeric characters + # used by Rails to sign session tokens. IMPORTANT: This is a + # site secret. It should be at least 50 characters. + RailsSessionSecretToken: "" + + Users: + # Config parameters to automatically setup new users. If enabled, + # this users will be able to self-activate. Enable this if you want + # to run an open instance where anyone can create an account and use + # the system without requiring manual approval. + # + # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on. + # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup. + AutoSetupNewUsers: false + AutoSetupNewUsersWithVmUUID: "" + AutoSetupNewUsersWithRepository: false + AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog] + + # When new_users_are_active is set to true, new users will be active + # immediately. This skips the "self-activate" step which enforces + # user agreements. Should only be enabled for development. + NewUsersAreActive: false + + # The e-mail address of the user you would like to become marked as an admin + # user on their first login. + # In the default configuration, authentication happens through the Arvados SSO + # server, which uses OAuth2 against Google's servers, so in that case this + # should be an address associated with a Google account. + AutoAdminUserWithEmail: "" + + # If auto_admin_first_user is set to true, the first user to log in when no + # other admin users exist will automatically become an admin user. + AutoAdminFirstUser: false + + # Email address to notify whenever a user creates a profile for the + # first time + UserProfileNotificationAddress: "" + AdminNotifierEmailFrom: arvados@example.com + EmailSubjectPrefix: "[ARVADOS] " + UserNotifierEmailFrom: arvados@example.com + NewUserNotificationRecipients: [] + NewInactiveUserNotificationRecipients: [] + + AuditLogs: + # Time to keep audit logs, in seconds. (An audit log is a row added + # to the "logs" table in the PostgreSQL database each time an + # Arvados object is created, modified, or deleted.) + # + # Currently, websocket event notifications rely on audit logs, so + # this should not be set lower than 600 (5 minutes). + MaxAge: 1209600 + + # Maximum number of log rows to delete in a single SQL transaction. + # + # If max_audit_log_delete_batch is 0, log entries will never be + # deleted by Arvados. Cleanup can be done by an external process + # without affecting any Arvados system processes, as long as very + # recent (<5 minutes old) logs are not deleted. + # + # 100000 is a reasonable batch size for most sites. + MaxDeleteBatch: 0 + + # Attributes to suppress in events and audit logs. Notably, + # specifying ["manifest_text"] here typically makes the database + # smaller and faster. + # + # Warning: Using any non-empty value here can have undesirable side + # effects for any client or component that relies on event logs. + # Use at your own risk. + UnloggedAttributes: [] + + SystemLogs: + # Maximum characters of (JSON-encoded) query parameters to include + # in each request log entry. When params exceed this size, they will + # be JSON-encoded, truncated to this size, and logged as + # params_truncated. + MaxRequestLogParamsSize: 2000 + + Collections: + # Allow clients to create collections by providing a manifest with + # unsigned data blob locators. IMPORTANT: This effectively disables + # access controls for data stored in Keep: a client who knows a hash + # can write a manifest that references the hash, pass it to + # collections.create (which will create a permission link), use + # collections.get to obtain a signature for that data locator, and + # use that signed locator to retrieve the data from Keep. Therefore, + # do not turn this on if your users expect to keep data private from + # one another! + BlobSigning: true + + # blob_signing_key is a string of alphanumeric characters used to + # generate permission signatures for Keep locators. It must be + # identical to the permission key given to Keep. IMPORTANT: This is + # a site secret. It should be at least 50 characters. + # + # Modifying blob_signing_key will invalidate all existing + # signatures, which can cause programs to fail (e.g., arv-put, + # arv-get, and Crunch jobs). To avoid errors, rotate keys only when + # no such processes are running. + BlobSigningKey: "" + + # Default replication level for collections. This is used when a + # collection's replication_desired attribute is nil. + DefaultReplication: 2 + + # Lifetime (in seconds) of blob permission signatures generated by + # the API server. This determines how long a client can take (after + # retrieving a collection record) to retrieve the collection data + # from Keep. If the client needs more time than that (assuming the + # collection still has the same content and the relevant user/token + # still has permission) the client can retrieve the collection again + # to get fresh signatures. + # + # This must be exactly equal to the -blob-signature-ttl flag used by + # keepstore servers. Otherwise, reading data blocks and saving + # collections will fail with HTTP 403 permission errors. + # + # Modifying blob_signature_ttl invalidates existing signatures; see + # blob_signing_key note above. + # + # The default is 2 weeks. + BlobSigningTTL: 1209600 + + # Default lifetime for ephemeral collections: 2 weeks. This must not + # be less than blob_signature_ttl. + DefaultTrashLifetime: 1209600 + + # Interval (seconds) between trash sweeps. During a trash sweep, + # collections are marked as trash if their trash_at time has + # arrived, and deleted if their delete_at time has arrived. + TrashSweepInterval: 60 + + # If true, enable collection versioning. + # When a collection's preserve_version field is true or the current version + # is older than the amount of seconds defined on preserve_version_if_idle, + # a snapshot of the collection's previous state is created and linked to + # the current collection. + CollectionVersioning: false + + # 0 = auto-create a new version on every update. + # -1 = never auto-create new versions. + # > 0 = auto-create a new version when older than the specified number of seconds. + PreserveVersionIfIdle: -1 + + Login: + # These settings are provided by your OAuth2 provider (e.g., + # sso-provider). + ProviderAppSecret: "" + ProviderAppID: "" + + Git: + # Git repositories must be readable by api server, or you won't be + # able to submit crunch jobs. To pass the test suites, put a clone + # of the arvados tree in {git_repositories_dir}/arvados.git or + # {git_repositories_dir}/arvados/.git + Repositories: /var/lib/arvados/git/repositories + + TLS: + Insecure: false + + Containers: + # List of supported Docker Registry image formats that compute nodes + # are able to use. `arv keep docker` will error out if a user tries + # to store an image with an unsupported format. Use an empty array + # to skip the compatibility check (and display a warning message to + # that effect). + # + # Example for sites running docker < 1.10: ["v1"] + # Example for sites running docker >= 1.10: ["v2"] + # Example for disabling check: [] + SupportedDockerImageFormats: ["v2"] + + # Include details about job reuse decisions in the server log. This + # causes additional database queries to run, so it should not be + # enabled unless you expect to examine the resulting logs for + # troubleshooting purposes. + LogReuseDecisions: false + + # Default value for keep_cache_ram of a container's runtime_constraints. + DefaultKeepCacheRAM: 268435456 + + # Number of times a container can be unlocked before being + # automatically cancelled. + MaxDispatchAttempts: 5 + + # Default value for container_count_max for container requests. This is the + # number of times Arvados will create a new container to satisfy a container + # request. If a container is cancelled it will retry a new container if + # container_count < container_count_max on any container requests associated + # with the cancelled container. + MaxRetryAttempts: 3 + + # The maximum number of compute nodes that can be in use simultaneously + # If this limit is reduced, any existing nodes with slot number >= new limit + # will not be counted against the new limit. In other words, the new limit + # won't be strictly enforced until those nodes with higher slot numbers + # go down. + MaxComputeVMs: 64 + + # Preemptible instance support (e.g. AWS Spot Instances) + # When true, child containers will get created with the preemptible + # scheduling parameter parameter set. + UsePreemptibleInstances: false + + # Include details about job reuse decisions in the server log. This + # causes additional database queries to run, so it should not be + # enabled unless you expect to examine the resulting logs for + # troubleshooting purposes. + LogReuseDecisions: false + + Logging: + # When you run the db:delete_old_container_logs task, it will find + # containers that have been finished for at least this many seconds, + # and delete their stdout, stderr, arv-mount, crunch-run, and + # crunchstat logs from the logs table. + MaxAge: 720h + + # These two settings control how frequently log events are flushed to the + # database. Log lines are buffered until either crunch_log_bytes_per_event + # has been reached or crunch_log_seconds_between_events has elapsed since + # the last flush. + LogBytesPerEvent: 4096 + LogSecondsBetweenEvents: 1 + + # The sample period for throttling logs, in seconds. + LogThrottlePeriod: 60 + + # Maximum number of bytes that job can log over crunch_log_throttle_period + # before being silenced until the end of the period. + LogThrottleBytes: 65536 + + # Maximum number of lines that job can log over crunch_log_throttle_period + # before being silenced until the end of the period. + LogThrottleLines: 1024 + + # Maximum bytes that may be logged by a single job. Log bytes that are + # silenced by throttling are not counted against this total. + LimitLogBytesPerJob: 67108864 + + LogPartialLineThrottlePeriod: 5 + + # Container logs are written to Keep and saved in a collection, + # which is updated periodically while the container runs. This + # value sets the interval (given in seconds) between collection + # updates. + LogUpdatePeriod: 1800 + + # The log collection is also updated when the specified amount of + # log data (given in bytes) is produced in less than one update + # period. + LogUpdateSize: 33554432 + + SLURM: + Managed: + # Path to dns server configuration directory + # (e.g. /etc/unbound.d/conf.d). If false, do not write any config + # files or touch restart.txt (see below). + DNSServerConfDir: "" + + # Template file for the dns server host snippets. See + # unbound.template in this directory for an example. If false, do + # not write any config files. + DNSServerConfTemplate: "" + + # String to write to {dns_server_conf_dir}/restart.txt (with a + # trailing newline) after updating local data. If false, do not + # open or write the restart.txt file. + DNSServerReloadCommand: "" + + # Command to run after each DNS update. Template variables will be + # substituted; see the "unbound" example below. If false, do not run + # a command. + DNSServerUpdateCommand: "" + + ComputeNodeDomain: "" + ComputeNodeNameservers: + - 192.168.1.1 + + # Hostname to assign to a compute node when it sends a "ping" and the + # hostname in its Node record is nil. + # During bootstrapping, the "ping" script is expected to notice the + # hostname given in the ping response, and update its unix hostname + # accordingly. + # If false, leave the hostname alone (this is appropriate if your compute + # nodes' hostnames are already assigned by some other mechanism). + # + # One way or another, the hostnames of your node records should agree + # with your DNS records and your /etc/slurm-llnl/slurm.conf files. + # + # Example for compute0000, compute0001, ....: + # assign_node_hostname: compute%04d + # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) + AssignNodeHostname: "compute%d" + + JobsAPI: + # Enable the legacy Jobs API. This value must be a string. + # 'auto' -- (default) enable the Jobs API only if it has been used before + # (i.e., there are job records in the database) + # 'true' -- enable the Jobs API despite lack of existing records. + # 'false' -- disable the Jobs API despite presence of existing records. + Enable: 'auto' + + # Git repositories must be readable by api server, or you won't be + # able to submit crunch jobs. To pass the test suites, put a clone + # of the arvados tree in {git_repositories_dir}/arvados.git or + # {git_repositories_dir}/arvados/.git + GitInternalDir: /var/lib/arvados/internal.git + + # Docker image to be used when none found in runtime_constraints of a job + DefaultDockerImage: "" + + # none or slurm_immediate + CrunchJobWrapper: none + + # username, or false = do not set uid when running jobs. + CrunchJobUser: crunch + + # The web service must be able to create/write this file, and + # crunch-job must be able to stat() it. + CrunchRefreshTrigger: /tmp/crunch_refresh_trigger + + # Control job reuse behavior when two completed jobs match the + # search criteria and have different outputs. + # + # If true, in case of a conflict, reuse the earliest job (this is + # similar to container reuse behavior). + # + # If false, in case of a conflict, do not reuse any completed job, + # but do reuse an already-running job if available (this is the + # original job reuse behavior, and is still the default). + ReuseJobIfOutputsDiffer: false + + Mail: + MailchimpAPIKey: "" + MailchimpListID: "" + SendUserSetupNotificationEmail: "" + IssueReporterEmailFrom: "" + IssueReporterEmailTo: "" + SupportEmailAddress: "" + EmailFrom: "" + RemoteClusters: + "*": + Proxy: false + ActivateUsers: false diff --git a/lib/dispatchcloud/container/queue_test.go b/lib/dispatchcloud/container/queue_test.go index 3c63fe51e6..daf7977ad5 100644 --- a/lib/dispatchcloud/container/queue_test.go +++ b/lib/dispatchcloud/container/queue_test.go @@ -74,6 +74,7 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) { defer wg.Done() err := cq.Unlock(uuid) c.Check(err, check.NotNil) + c.Check(err, check.ErrorMatches, ".*cannot unlock when Queued*.") err = cq.Lock(uuid) c.Check(err, check.IsNil) @@ -101,9 +102,6 @@ func (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) { }() } wg.Wait() - - err = cq.Cancel(arvadostest.CompletedContainerUUID) - c.Check(err, check.ErrorMatches, `.*State cannot change from Complete to Cancelled.*`) } func (suite *IntegrationSuite) TestCancelIfNoInstanceType(c *check.C) { diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb index b613e97a10..e1cb1ba8ef 100644 --- a/services/api/app/controllers/application_controller.rb +++ b/services/api/app/controllers/application_controller.rb @@ -53,8 +53,6 @@ class ApplicationController < ActionController::Base before_action(:render_404_if_no_object, except: [:index, :create] + ERROR_ACTIONS) - theme Rails.configuration.arvados_theme - attr_writer :resource_attrs begin @@ -83,14 +81,11 @@ class ApplicationController < ActionController::Base def default_url_options options = {} - if Rails.configuration.host - options[:host] = Rails.configuration.host - end - if Rails.configuration.port - options[:port] = Rails.configuration.port - end - if Rails.configuration.protocol - options[:protocol] = Rails.configuration.protocol + if Rails.configuration.Services.Controller.ExternalURL != "" + exturl = URI.parse(Rails.configuration.Services.Controller.ExternalURL) + options[:host] = exturl.host + options[:port] = exturl.port + options[:protocol] = exturl.scheme end options end @@ -306,7 +301,7 @@ class ApplicationController < ActionController::Base limit_query.each do |record| new_limit += 1 read_total += record.read_length.to_i - if read_total >= Rails.configuration.max_index_database_read + if read_total >= Rails.configuration.API.MaxIndexDatabaseRead new_limit -= 1 if new_limit > 1 @limit = new_limit break @@ -419,8 +414,7 @@ class ApplicationController < ActionController::Base end def disable_api_methods - if Rails.configuration.disable_api_methods. - include?(controller_name + "." + action_name) + if Rails.configuration.API.DisabledAPIs.include?(controller_name + "." + action_name) send_error("Disabled", status: 404) end end diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb index f7db1ef121..1004f07021 100644 --- a/services/api/app/controllers/arvados/v1/groups_controller.rb +++ b/services/api/app/controllers/arvados/v1/groups_controller.rb @@ -191,7 +191,7 @@ class Arvados::V1::GroupsController < ApplicationController table_names = Hash[klasses.collect { |k| [k, k.table_name] }] - disabled_methods = Rails.configuration.disable_api_methods + disabled_methods = Rails.configuration.API.DisabledAPIs avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')} klasses = avail_klasses.keys diff --git a/services/api/app/controllers/arvados/v1/healthcheck_controller.rb b/services/api/app/controllers/arvados/v1/healthcheck_controller.rb index c12bc6e90c..6c38224376 100644 --- a/services/api/app/controllers/arvados/v1/healthcheck_controller.rb +++ b/services/api/app/controllers/arvados/v1/healthcheck_controller.rb @@ -19,7 +19,7 @@ class Arvados::V1::HealthcheckController < ApplicationController mgmt_token = Rails.configuration.ManagementToken auth_header = request.headers['Authorization'] - if !mgmt_token + if mgmt_token == "" send_json ({"errors" => "disabled"}), status: 404 elsif !auth_header send_json ({"errors" => "authorization required"}), status: 401 diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb index 8ff2a97c46..13e47f76cd 100644 --- a/services/api/app/controllers/arvados/v1/schema_controller.rb +++ b/services/api/app/controllers/arvados/v1/schema_controller.rb @@ -25,6 +25,8 @@ class Arvados::V1::SchemaController < ApplicationController def discovery_doc Rails.cache.fetch 'arvados_v1_rest_discovery' do Rails.application.eager_load! + remoteHosts = {} + Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end } discovery = { kind: "discovery#restDescription", discoveryVersion: "v1", @@ -39,41 +41,34 @@ class Arvados::V1::SchemaController < ApplicationController title: "Arvados API", description: "The API to interact with Arvados.", documentationLink: "http://doc.arvados.org/api/index.html", - defaultCollectionReplication: Rails.configuration.default_collection_replication, + defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication, protocol: "rest", baseUrl: root_url + "arvados/v1/", basePath: "/arvados/v1/", rootUrl: root_url, servicePath: "arvados/v1/", batchPath: "batch", - uuidPrefix: Rails.application.config.uuid_prefix, - defaultTrashLifetime: Rails.application.config.default_trash_lifetime, - blobSignatureTtl: Rails.application.config.blob_signature_ttl, - maxRequestSize: Rails.application.config.max_request_size, - maxItemsPerResponse: Rails.application.config.max_items_per_response, - dockerImageFormats: Rails.application.config.docker_image_formats, - crunchLogBytesPerEvent: Rails.application.config.crunch_log_bytes_per_event, - crunchLogSecondsBetweenEvents: Rails.application.config.crunch_log_seconds_between_events, - crunchLogThrottlePeriod: Rails.application.config.crunch_log_throttle_period, - crunchLogThrottleBytes: Rails.application.config.crunch_log_throttle_bytes, - crunchLogThrottleLines: Rails.application.config.crunch_log_throttle_lines, - crunchLimitLogBytesPerJob: Rails.application.config.crunch_limit_log_bytes_per_job, - crunchLogPartialLineThrottlePeriod: Rails.application.config.crunch_log_partial_line_throttle_period, - crunchLogUpdatePeriod: Rails.application.config.crunch_log_update_period, - crunchLogUpdateSize: Rails.application.config.crunch_log_update_size, - remoteHosts: Rails.configuration.remote_hosts, - remoteHostsViaDNS: Rails.configuration.remote_hosts_via_dns, - websocketUrl: Rails.application.config.websocket_address, - workbenchUrl: Rails.application.config.workbench_address, - keepWebServiceUrl: Rails.application.config.keep_web_service_url, - gitUrl: case Rails.application.config.git_repo_https_base - when false - '' - when true - 'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix - else - Rails.application.config.git_repo_https_base - end, + uuidPrefix: Rails.configuration.ClusterID, + defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime, + blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL, + maxRequestSize: Rails.configuration.API.MaxRequestSize, + maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse, + dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats, + crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent, + crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents, + crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod, + crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes, + crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines, + crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob, + crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod, + crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod, + crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize, + remoteHosts: remoteHosts, + remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy, + websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s, + workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s, + keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s, + gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s, parameters: { alt: { type: "string", @@ -405,7 +400,7 @@ class Arvados::V1::SchemaController < ApplicationController end end end - Rails.configuration.disable_api_methods.each do |method| + Rails.configuration.API.DisabledAPIs.each do |method| ctrl, action = method.split('.', 2) discovery[:resources][ctrl][:methods].delete(action.to_sym) end diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb index b421f54596..7365577871 100644 --- a/services/api/app/controllers/static_controller.rb +++ b/services/api/app/controllers/static_controller.rb @@ -12,8 +12,8 @@ class StaticController < ApplicationController def home respond_to do |f| f.html do - if Rails.configuration.workbench_address - redirect_to Rails.configuration.workbench_address + if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.empty? + redirect_to Rails.configuration.Services.Workbench1.ExternalURL else render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead." end diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb index 237156f116..6e18cdd460 100644 --- a/services/api/app/controllers/user_sessions_controller.rb +++ b/services/api/app/controllers/user_sessions_controller.rb @@ -52,7 +52,7 @@ class UserSessionsController < ApplicationController :first_name => omniauth['info']['first_name'], :last_name => omniauth['info']['last_name'], :identity_url => omniauth['info']['identity_url'], - :is_active => Rails.configuration.new_users_are_active, + :is_active => Rails.configuration.Users.NewUsersAreActive, :owner_uuid => system_user_uuid) if omniauth['info']['username'] user.set_initial_username(requested: omniauth['info']['username']) @@ -120,7 +120,7 @@ class UserSessionsController < ApplicationController flash[:notice] = 'You have logged off' return_to = params[:return_to] || root_url - redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}" + redirect_to "#{Rails.configuration.Services.SSO.ExternalURL}/users/sign_out?redirect_uri=#{CGI.escape return_to}" end # login - Just bounce to /auth/joshid. The only purpose of this function is diff --git a/services/api/app/mailers/admin_notifier.rb b/services/api/app/mailers/admin_notifier.rb index 87a5699f49..45e329030f 100644 --- a/services/api/app/mailers/admin_notifier.rb +++ b/services/api/app/mailers/admin_notifier.rb @@ -5,32 +5,32 @@ class AdminNotifier < ActionMailer::Base include AbstractController::Callbacks - default from: Rails.configuration.admin_notifier_email_from + default from: Rails.configuration.Users.AdminNotifierEmailFrom def new_user(user) @user = user - if not Rails.configuration.new_user_notification_recipients.empty? then - @recipients = Rails.configuration.new_user_notification_recipients + if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then + @recipients = Rails.configuration.Users.NewUserNotificationRecipients logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)" add_to_subject = '' - if Rails.configuration.auto_setup_new_users + if Rails.configuration.Users.AutoSetupNewUsers add_to_subject = @user.is_invited ? ' and setup' : ', but not setup' end mail(to: @recipients, - subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification" + subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New user created#{add_to_subject} notification" ) end end def new_inactive_user(user) @user = user - if not Rails.configuration.new_inactive_user_notification_recipients.empty? then - @recipients = Rails.configuration.new_inactive_user_notification_recipients + if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then + @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)" mail(to: @recipients, - subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification" + subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification" ) end end diff --git a/services/api/app/mailers/profile_notifier.rb b/services/api/app/mailers/profile_notifier.rb index 8c0c5ec863..849eefe8e1 100644 --- a/services/api/app/mailers/profile_notifier.rb +++ b/services/api/app/mailers/profile_notifier.rb @@ -3,7 +3,7 @@ # SPDX-License-Identifier: AGPL-3.0 class ProfileNotifier < ActionMailer::Base - default from: Rails.configuration.admin_notifier_email_from + default from: Rails.configuration.Users.AdminNotifierEmailFrom def profile_created(user, address) @user = user diff --git a/services/api/app/mailers/user_notifier.rb b/services/api/app/mailers/user_notifier.rb index 5fb7036bf2..3d1b91f20e 100644 --- a/services/api/app/mailers/user_notifier.rb +++ b/services/api/app/mailers/user_notifier.rb @@ -5,7 +5,7 @@ class UserNotifier < ActionMailer::Base include AbstractController::Callbacks - default from: Rails.configuration.user_notifier_email_from + default from: Rails.configuration.Users.UserNotifierEmailFrom def account_is_setup(user) @user = user diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb index 38538cb4ff..69d2945fc1 100644 --- a/services/api/app/models/api_client_authorization.rb +++ b/services/api/app/models/api_client_authorization.rb @@ -87,14 +87,14 @@ class ApiClientAuthorization < ArvadosModel end def self.remote_host(uuid_prefix:) - Rails.configuration.remote_hosts[uuid_prefix] || - (Rails.configuration.remote_hosts_via_dns && + Rails.configuration.RemoteClusters[uuid_prefix].Host || + (Rails.configuration.RemoteClusters["*"].Proxy && uuid_prefix+".arvadosapi.com") end def self.validate(token:, remote: nil) return nil if !token - remote ||= Rails.configuration.uuid_prefix + remote ||= Rails.configuration.ClusterID case token[0..2] when 'v2/' @@ -134,7 +134,7 @@ class ApiClientAuthorization < ArvadosModel end uuid_prefix = uuid[0..4] - if uuid_prefix == Rails.configuration.uuid_prefix + if uuid_prefix == Rails.configuration.ClusterID # If the token were valid, we would have validated it above return nil elsif uuid_prefix.length != 5 @@ -153,7 +153,7 @@ class ApiClientAuthorization < ArvadosModel # [re]validate it. begin clnt = HTTPClient.new - if Rails.configuration.sso_insecure + if Rails.configuration.TLS.Insecure clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE else # Use system CA certificates @@ -164,7 +164,7 @@ class ApiClientAuthorization < ArvadosModel end remote_user = SafeJSON.load( clnt.get_content('https://' + host + '/arvados/v1/users/current', - {'remote' => Rails.configuration.uuid_prefix}, + {'remote' => Rails.configuration.ClusterID}, {'Authorization' => 'Bearer ' + token})) rescue => e Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}" @@ -187,8 +187,8 @@ class ApiClientAuthorization < ArvadosModel end end - if Rails.configuration.new_users_are_active || - Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4]) + if Rails.configuration.Users.NewUsersAreActive || + Rails.configuration.RemoteClusters[remote_user['uuid'][0..4]].andand["ActivateUsers"] # Update is_active to whatever it is at the remote end user.is_active = remote_user['is_active'] elsif !remote_user['is_active'] diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb index e619abe8c8..339bc9e23f 100644 --- a/services/api/app/models/arvados_model.rb +++ b/services/api/app/models/arvados_model.rb @@ -411,7 +411,7 @@ class ArvadosModel < ApplicationRecord end def logged_attributes - attributes.except(*Rails.configuration.unlogged_attributes) + attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes) end def self.full_text_searchable_columns @@ -735,7 +735,7 @@ class ArvadosModel < ApplicationRecord end def self.uuid_like_pattern - "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________" + "#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________" end def self.uuid_regex @@ -814,8 +814,8 @@ class ArvadosModel < ApplicationRecord end def is_audit_logging_enabled? - return !(Rails.configuration.max_audit_log_age.to_i == 0 && - Rails.configuration.max_audit_log_delete_batch.to_i > 0) + return !(Rails.configuration.AuditLogs.MaxAge.to_i == 0 && + Rails.configuration.AuditLogs.MaxDeleteBatch.to_i > 0) end def log_start_state diff --git a/services/api/app/models/blob.rb b/services/api/app/models/blob.rb index 55a257856c..54a4f369d9 100644 --- a/services/api/app/models/blob.rb +++ b/services/api/app/models/blob.rb @@ -51,15 +51,15 @@ class Blob timestamp = opts[:expire] else timestamp = db_current_time.to_i + - (opts[:ttl] || Rails.configuration.blob_signature_ttl) + (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL) end timestamp_hex = timestamp.to_s(16) # => "53163cb4" - blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16) + blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16) # Generate a signature. signature = - generate_signature((opts[:key] or Rails.configuration.blob_signing_key), + generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey), blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl) blob_locator + '+A' + signature + '@' + timestamp_hex @@ -103,10 +103,10 @@ class Blob if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i) raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.' end - blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16) + blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16) my_signature = - generate_signature((opts[:key] or Rails.configuration.blob_signing_key), + generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey), blob_hash, opts[:api_token], timestamp, blob_signature_ttl) if my_signature != given_signature diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb index 590228b1af..e0f6539699 100644 --- a/services/api/app/models/collection.rb +++ b/services/api/app/models/collection.rb @@ -125,7 +125,7 @@ class Collection < ArvadosModel # Signature provided, but verify_signature did not like it. logger.warn "Invalid signature on locator #{tok}" raise ArvadosModel::PermissionDeniedError - elsif Rails.configuration.permit_create_collection_with_unsigned_manifest + elsif !Rails.configuration.Collections.BlobSigning # No signature provided, but we are running in insecure mode. logger.debug "Missing signature on locator #{tok} ignored" elsif Blob.new(tok).empty? @@ -323,9 +323,9 @@ class Collection < ArvadosModel end def should_preserve_version? - return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys)) + return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys)) - idle_threshold = Rails.configuration.preserve_version_if_idle + idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle if !self.preserve_version_was && (idle_threshold < 0 || (idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds)) @@ -371,7 +371,7 @@ class Collection < ArvadosModel return manifest_text else token = Thread.current[:token] - exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl, + exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL, trash_at].compact.map(&:to_i).min self.class.sign_manifest manifest_text, token, exp end @@ -379,7 +379,7 @@ class Collection < ArvadosModel def self.sign_manifest manifest, token, exp=nil if exp.nil? - exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl + exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL end signing_opts = { api_token: token, @@ -489,7 +489,7 @@ class Collection < ArvadosModel # # If filter_compatible_format is true (the default), only return image # collections which are support by the installation as indicated by - # Rails.configuration.docker_image_formats. Will follow + # Rails.configuration.Containers.SupportedDockerImageFormats. Will follow # 'docker_image_migration' links if search_term resolves to an incompatible # image, but an equivalent compatible image is available. def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true) @@ -500,15 +500,17 @@ class Collection < ArvadosModel joins("JOIN collections ON links.head_uuid = collections.uuid"). order("links.created_at DESC") - if (Rails.configuration.docker_image_formats.include? 'v1' and - Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false + docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats + + if (docker_image_formats.include? 'v1' and + docker_image_formats.include? 'v2') or filter_compatible_format == false pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/ - elsif Rails.configuration.docker_image_formats.include? 'v2' + elsif docker_image_formats.include? 'v2' pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/ - elsif Rails.configuration.docker_image_formats.include? 'v1' + elsif docker_image_formats.include? 'v1' pattern = /^[0-9A-Fa-f]{64}\.tar$/ else - raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}" + raise "Unrecognized configuration for docker_image_formats #{docker_image_formats}" end # If the search term is a Collection locator that contains one file @@ -516,7 +518,9 @@ class Collection < ArvadosModel if loc = Keep::Locator.parse(search_term) loc.strip_hints! coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1) - if coll_match.any? or Rails.configuration.remote_hosts.length == 0 + rc = Rails.configuration.RemoteClusters.select{ |k| + k != :"*" && k != Rails.configuration.ClusterID} + if coll_match.any? or rc.length == 0 return get_compatible_images(readers, pattern, coll_match) else # Allow bare pdh that doesn't exist in the local database so diff --git a/services/api/app/models/commit.rb b/services/api/app/models/commit.rb index 921c690cd0..a3cef64212 100644 --- a/services/api/app/models/commit.rb +++ b/services/api/app/models/commit.rb @@ -148,7 +148,7 @@ class Commit < ActiveRecord::Base unless src_gitdir raise ArgumentError.new "no local repository for #{repo_name}" end - dst_gitdir = Rails.configuration.git_internal_dir + dst_gitdir = Rails.configuration.Containers.JobsAPI.GitInternalDir begin commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip diff --git a/services/api/app/models/commit_ancestor.rb b/services/api/app/models/commit_ancestor.rb index 3d5152c3ff..59e8552f32 100644 --- a/services/api/app/models/commit_ancestor.rb +++ b/services/api/app/models/commit_ancestor.rb @@ -17,7 +17,7 @@ class CommitAncestor < ActiveRecord::Base protected def ask_git_whether_is - @gitdirbase = Rails.configuration.git_repositories_dir + @gitdirbase = Rails.configuration.Git.Repositories self.is = nil Dir.foreach @gitdirbase do |repo| next if repo.match(/^\./) diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb index fb900a993d..45cd13bbcd 100644 --- a/services/api/app/models/container.rb +++ b/services/api/app/models/container.rb @@ -89,7 +89,8 @@ class Container < ArvadosModel nil => [Queued], Queued => [Locked, Cancelled], Locked => [Queued, Running, Cancelled], - Running => [Complete, Cancelled] + Running => [Complete, Cancelled], + Complete => [Cancelled] } def self.limit_index_columns_read @@ -205,7 +206,7 @@ class Container < ArvadosModel rc = {} defaults = { 'keep_cache_ram' => - Rails.configuration.container_default_keep_cache_ram, + Rails.configuration.Containers.DefaultKeepCacheRAM, } defaults.merge(runtime_constraints).each do |k, v| if v.is_a? Array @@ -368,7 +369,7 @@ class Container < ArvadosModel transaction do reload(lock: 'FOR UPDATE') check_unlock_fail - if self.lock_count < Rails.configuration.max_container_dispatch_attempts + if self.lock_count < Rails.configuration.Containers.MaxDispatchAttempts update_attributes!(state: Queued) else update_attributes!(state: Cancelled, @@ -497,7 +498,7 @@ class Container < ArvadosModel return false end - if self.state == Running && + if self.state_was == Running && !current_api_client_authorization.nil? && (current_api_client_authorization.uuid == self.auth_uuid || current_api_client_authorization.token == self.runtime_token) @@ -505,6 +506,8 @@ class Container < ArvadosModel # change priority or log. permitted.push *final_attrs permitted = permitted - [:log, :priority] + elsif !current_user.andand.is_admin + raise PermissionDeniedError elsif self.locked_by_uuid && self.locked_by_uuid != current_api_client_authorization.andand.uuid # When locked, progress fields cannot be updated by the wrong # dispatcher, even though it has admin privileges. diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb index 292decafbf..24882860eb 100644 --- a/services/api/app/models/container_request.rb +++ b/services/api/app/models/container_request.rb @@ -196,7 +196,7 @@ class ContainerRequest < ArvadosModel self.mounts ||= {} self.secret_mounts ||= {} self.cwd ||= "." - self.container_count_max ||= Rails.configuration.container_count_max + self.container_count_max ||= Rails.configuration.Containers.MaxComputeVMs self.scheduling_parameters ||= {} self.output_ttl ||= 0 self.priority ||= 0 @@ -252,7 +252,7 @@ class ContainerRequest < ArvadosModel if self.state == Committed # If preemptible instances (eg: AWS Spot Instances) are allowed, # ask them on child containers by default. - if Rails.configuration.preemptible_instances and !c.nil? and + if Rails.configuration.Containers.UsePreemptibleInstances and !c.nil? and self.scheduling_parameters['preemptible'].nil? self.scheduling_parameters['preemptible'] = true end @@ -322,7 +322,7 @@ class ContainerRequest < ArvadosModel scheduling_parameters['partitions'].size) errors.add :scheduling_parameters, "partitions must be an array of strings" end - if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible'] + if !Rails.configuration.Containers.UsePreemptibleInstances and scheduling_parameters['preemptible'] errors.add :scheduling_parameters, "preemptible instances are not allowed" end if scheduling_parameters.include? 'max_run_time' and diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb index 420386cdc2..4d63deb99c 100644 --- a/services/api/app/models/job.rb +++ b/services/api/app/models/job.rb @@ -287,7 +287,7 @@ class Job < ArvadosModel log_reuse_info { "job #{j.uuid} has nil output" } elsif j.log.nil? log_reuse_info { "job #{j.uuid} has nil log" } - elsif Rails.configuration.reuse_job_if_outputs_differ + elsif Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output) # Ignore: keep looking for an incomplete job or one whose # output is readable. @@ -491,9 +491,9 @@ class Job < ArvadosModel end def find_docker_image_locator - if runtime_constraints.is_a? Hash + if runtime_constraints.is_a? Hash and Rails.configuration.Containers.JobsAPI.DefaultDockerImage != "" runtime_constraints['docker_image'] ||= - Rails.configuration.default_docker_image_for_jobs + Rails.configuration.Containers.JobsAPI.DefaultDockerImage end resolve_runtime_constraint("docker_image", @@ -569,7 +569,7 @@ class Job < ArvadosModel def trigger_crunch_dispatch_if_cancelled if @need_crunch_dispatch_trigger - File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do + File.open(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger, 'wb') do # That's all, just create/touch a file for crunch-job to see. end end diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb index 148dffc230..044d83c287 100644 --- a/services/api/app/models/node.rb +++ b/services/api/app/models/node.rb @@ -39,7 +39,7 @@ class Node < ArvadosModel api_accessible :superuser, :extend => :user do |t| t.add :first_ping_at t.add :info - t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers + t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers }, :as => :nameservers end after_initialize do @@ -47,7 +47,7 @@ class Node < ArvadosModel end def domain - super || Rails.configuration.compute_node_domain + super || Rails.configuration.Containers.SLURM.Managed.ComputeNodeDomain end def api_job_uuid @@ -143,7 +143,7 @@ class Node < ArvadosModel protected def assign_hostname - if self.hostname.nil? and Rails.configuration.assign_node_hostname + if self.hostname.nil? and Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname self.hostname = self.class.hostname_for_slot(self.slot_number) end end @@ -159,7 +159,7 @@ class Node < ArvadosModel # query label: 'Node.available_slot_number', # [col_id, val] for $1 vars: - [[nil, Rails.configuration.max_compute_nodes]], + [[nil, Rails.configuration.Containers.MaxComputeVMs]], ).rows.first.andand.first end @@ -194,24 +194,25 @@ class Node < ArvadosModel template_vars = { hostname: hostname, - uuid_prefix: Rails.configuration.uuid_prefix, + uuid_prefix: Rails.configuration.ClusterID, ip_address: ip_address, ptr_domain: ptr_domain, } - if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template + if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and + !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty?) tmpfile = nil begin begin - template = IO.read(Rails.configuration.dns_server_conf_template) + template = IO.read(Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate) rescue IOError, SystemCallError => e - logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}" + logger.error "Reading #{Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate}: #{e.message}" raise end - hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf" + hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf" Tempfile.open(["#{hostname}-", ".conf.tmp"], - Rails.configuration.dns_server_conf_dir) do |f| + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir) do |f| tmpfile = f.path f.puts template % template_vars end @@ -227,20 +228,21 @@ class Node < ArvadosModel end end - if Rails.configuration.dns_server_update_command - cmd = Rails.configuration.dns_server_update_command % template_vars + if !Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand.empty? + cmd = Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand % template_vars if not system cmd logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}" ok = false end end - if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command - restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt') + if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and + !Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand.to_s.empty?) + restartfile = File.join(Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, 'restart.txt') begin File.open(restartfile, 'w') do |f| # Typically, this is used to trigger a dns server restart - f.puts Rails.configuration.dns_server_reload_command + f.puts Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand end rescue IOError, SystemCallError => e logger.error "Unable to write #{restartfile}: #{e.message}" @@ -252,7 +254,7 @@ class Node < ArvadosModel end def self.hostname_for_slot(slot_number) - config = Rails.configuration.assign_node_hostname + config = Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname return nil if !config @@ -261,10 +263,13 @@ class Node < ArvadosModel # At startup, make sure all DNS entries exist. Otherwise, slurmctld # will refuse to start. - if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname - (0..Rails.configuration.max_compute_nodes-1).each do |slot_number| + if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and + !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty? and + !Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname.empty?) + + (0..Rails.configuration.Containers.MaxComputeVMs-1).each do |slot_number| hostname = hostname_for_slot(slot_number) - hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf" + hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf" if !File.exist? hostfile n = Node.where(:slot_number => slot_number).first if n.nil? or n.ip_address.nil? diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb index 48655156c4..5e0e39f9be 100644 --- a/services/api/app/models/repository.rb +++ b/services/api/app/models/repository.rb @@ -49,7 +49,7 @@ class Repository < ArvadosModel # prefers bare repositories over checkouts. [["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args| [:uuid, :name].each do |path_attr| - git_dir = File.join(Rails.configuration.git_repositories_dir, + git_dir = File.join(Rails.configuration.Git.Repositories, repo_base % send(path_attr), *join_args) return git_dir if File.exist?(git_dir) end @@ -98,22 +98,27 @@ class Repository < ArvadosModel end def ssh_clone_url - _clone_url :git_repo_ssh_base, 'git@git.%s.arvadosapi.com:' + _clone_url Rails.configuration.Services.GitSSH.andand.ExternalURL, 'ssh://git@git.%s.arvadosapi.com' end def https_clone_url - _clone_url :git_repo_https_base, 'https://git.%s.arvadosapi.com/' + _clone_url Rails.configuration.Services.GitHTTP.andand.ExternalURL, 'https://git.%s.arvadosapi.com/' end def _clone_url config_var, default_base_fmt - configured_base = Rails.configuration.send config_var - return nil if configured_base == false - prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5] - if prefix == Rails.configuration.uuid_prefix and configured_base != true - base = configured_base + if not config_var + return "" + end + prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5] + if prefix == Rails.configuration.ClusterID and config_var != URI("") + base = config_var + else + base = URI(default_base_fmt % prefix) + end + if base.scheme == "ssh" + '%s@%s:%s.git' % [base.user, base.host, name] else - base = default_base_fmt % prefix + '%s%s.git' % [base, name] end - '%s%s.git' % [base, name] end end diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb index de85cc5a8e..989a975924 100644 --- a/services/api/app/models/user.rb +++ b/services/api/app/models/user.rb @@ -34,7 +34,7 @@ class User < ArvadosModel after_create :add_system_group_permission_link after_create :invalidate_permissions_cache after_create :auto_setup_new_user, :if => Proc.new { |user| - Rails.configuration.auto_setup_new_users and + Rails.configuration.Users.AutoSetupNewUsers and (user.uuid != system_user_uuid) and (user.uuid != anonymous_user_uuid) } @@ -81,7 +81,7 @@ class User < ArvadosModel def is_invited !!(self.is_active || - Rails.configuration.new_users_are_active || + Rails.configuration.Users.NewUsersAreActive || self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first) end @@ -358,15 +358,15 @@ class User < ArvadosModel current_user.andand.is_admin or (self == current_user && self.redirect_to_user_uuid.nil? && - self.is_active == Rails.configuration.new_users_are_active) + self.is_active == Rails.configuration.Users.NewUsersAreActive) end def check_auto_admin return if self.uuid.end_with?('anonymouspublic') if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and - Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or + !Rails.configuration.Users.AutoAdminUserWithEmail.empty? and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or (User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and - Rails.configuration.auto_admin_first_user) + Rails.configuration.Users.AutoAdminFirstUser) self.is_admin = true self.is_active = true end @@ -381,7 +381,7 @@ class User < ArvadosModel quoted_name = self.class.connection.quote_string(basename) next_username = basename next_suffix = 1 - while Rails.configuration.auto_setup_name_blacklist.include?(next_username) + while Rails.configuration.Users.AutoSetupUsernameBlacklist.include?(next_username) next_suffix += 1 next_username = "%s%i" % [basename, next_suffix] end @@ -493,7 +493,7 @@ class User < ArvadosModel # create login permission for the given vm_uuid, if it does not already exist def create_vm_login_permission_link(vm_uuid, repo_name) # vm uuid is optional - return if !vm_uuid + return if vm_uuid == "" vm = VirtualMachine.where(uuid: vm_uuid).first if !vm @@ -563,10 +563,10 @@ class User < ArvadosModel def auto_setup_new_user setup(openid_prefix: Rails.configuration.default_openid_prefix) if username - create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid, + create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID, username) repo_name = "#{username}/#{username}" - if Rails.configuration.auto_setup_new_users_with_repository and + if Rails.configuration.Users.AutoSetupNewUsersWithRepository and Repository.where(name: repo_name).first.nil? repo = Repository.create!(name: repo_name, owner_uuid: uuid) Link.create!(tail_uuid: uuid, head_uuid: repo.uuid, @@ -579,7 +579,7 @@ class User < ArvadosModel def send_profile_created_notification if self.prefs_changed? if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile'] - profile_notification_address = Rails.configuration.user_profile_notification_address + profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address end end diff --git a/services/api/app/views/admin_notifier/new_inactive_user.text.erb b/services/api/app/views/admin_notifier/new_inactive_user.text.erb index 097412c251..afcf34da71 100644 --- a/services/api/app/views/admin_notifier/new_inactive_user.text.erb +++ b/services/api/app/views/admin_notifier/new_inactive_user.text.erb @@ -7,10 +7,10 @@ A new user landed on the inactive user page: <%= @user.full_name %> <<%= @user.email %>> -<% if Rails.configuration.workbench_address -%> +<% if Rails.configuration.Services.Workbench1.ExternalURL -%> Please see workbench for more information: - <%= Rails.configuration.workbench_address %> + <%= Rails.configuration.Services.Workbench1.ExternalURL %> <% end -%> Thanks, diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb index d21513f7f0..670b84b7c1 100644 --- a/services/api/app/views/admin_notifier/new_user.text.erb +++ b/services/api/app/views/admin_notifier/new_user.text.erb @@ -4,7 +4,7 @@ SPDX-License-Identifier: AGPL-3.0 %> <% add_to_message = '' - if Rails.configuration.auto_setup_new_users + if Rails.configuration.Users.AutoSetupNewUsers add_to_message = @user.is_invited ? ' and setup' : ', but not setup' end %> @@ -14,12 +14,11 @@ A new user has been created<%=add_to_message%>: This user is <%= @user.is_active ? '' : 'NOT ' %>active. -<% if Rails.configuration.workbench_address -%> +<% if Rails.configuration.Services.Workbench1.ExternalURL -%> Please see workbench for more information: - <%= Rails.configuration.workbench_address %> + <%= Rails.configuration.Services.Workbench1.ExternalURL %> <% end -%> Thanks, Your friendly Arvados robot. - diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb index ca7082774d..50d164bfa1 100644 --- a/services/api/app/views/user_notifier/account_is_setup.text.erb +++ b/services/api/app/views/user_notifier/account_is_setup.text.erb @@ -8,9 +8,9 @@ SPDX-License-Identifier: AGPL-3.0 %> Hi there, <% end -%> -Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.workbench_address %>at +Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services.Workbench1.ExternalURL %>at - <%= Rails.configuration.workbench_address %><%= "/" if !Rails.configuration.workbench_address.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %> + <%= Rails.configuration.Services.Workbench1.ExternalURL %><%= "/" if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %> for connection instructions. diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml index 98443b428f..3ad3cac2b2 100644 --- a/services/api/config/application.default.yml +++ b/services/api/config/application.default.yml @@ -13,450 +13,6 @@ # 5. Section in application.default.yml called "common" common: - ### - ### Essential site configuration - ### - - # The prefix used for all database identifiers to identify the record as - # originating from this site. Must be exactly 5 alphanumeric characters - # (lowercase ASCII letters and digits). - uuid_prefix: ~ - - # secret_token is a string of alphanumeric characters used by Rails - # to sign session tokens. IMPORTANT: This is a site secret. It - # should be at least 50 characters. - secret_token: ~ - - # blob_signing_key is a string of alphanumeric characters used to - # generate permission signatures for Keep locators. It must be - # identical to the permission key given to Keep. IMPORTANT: This is - # a site secret. It should be at least 50 characters. - # - # Modifying blob_signing_key will invalidate all existing - # signatures, which can cause programs to fail (e.g., arv-put, - # arv-get, and Crunch jobs). To avoid errors, rotate keys only when - # no such processes are running. - blob_signing_key: ~ - - # These settings are provided by your OAuth2 provider (e.g., - # sso-provider). - sso_app_secret: ~ - sso_app_id: ~ - sso_provider_url: ~ - - # If this is not false, HTML requests at the API server's root URL - # are redirected to this location, and it is provided in the text of - # user activation notification email messages to remind them where - # to log in. - workbench_address: false - - # Client-facing URI for websocket service. Nginx should be - # configured to proxy this URI to arvados-ws; see - # http://doc.arvados.org/install/install-ws.html - # - # If websocket_address is false (which is the default), no websocket - # server will be advertised to clients. This configuration is not - # supported. - # - # Example: - #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket - websocket_address: false - - # Maximum number of websocket connections allowed - websocket_max_connections: 500 - - # Maximum number of events a single connection can be backlogged - websocket_max_notify_backlog: 1000 - - # Maximum number of subscriptions a single websocket connection can have - # active. - websocket_max_filters: 10 - - # Git repositories must be readable by api server, or you won't be - # able to submit crunch jobs. To pass the test suites, put a clone - # of the arvados tree in {git_repositories_dir}/arvados.git or - # {git_repositories_dir}/arvados/.git - git_repositories_dir: /var/lib/arvados/git/repositories - - # This is a (bare) repository that stores commits used in jobs. When a job - # runs, the source commits are first fetched into this repository, then this - # repository is used to deploy to compute nodes. This should NOT be a - # subdirectory of {git_repositiories_dir}. - git_internal_dir: /var/lib/arvados/internal.git - - # Default replication level for collections. This is used when a - # collection's replication_desired attribute is nil. - default_collection_replication: 2 - - - ### - ### Overriding default advertised hostnames/URLs - ### - - # If not false, this is the hostname, port, and protocol that will be used - # for root_url and advertised in the discovery document. By default, use - # the default Rails logic for deciding on a hostname. - host: false - port: false - protocol: false - - # Base part of SSH git clone url given with repository resources. If - # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is - # used. If false, SSH clone URLs are not advertised. Include a - # trailing ":" or "/" if needed: it will not be added automatically. - git_repo_ssh_base: true - - # Base part of HTTPS git clone urls given with repository - # resources. This is expected to be an arv-git-httpd service which - # accepts API tokens as HTTP-auth passwords. If true, the default - # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false, - # HTTPS clone URLs are not advertised. Include a trailing ":" or "/" - # if needed: it will not be added automatically. - git_repo_https_base: true - - - ### - ### New user and & email settings - ### - - # Config parameters to automatically setup new users. If enabled, - # this users will be able to self-activate. Enable this if you want - # to run an open instance where anyone can create an account and use - # the system without requiring manual approval. - # - # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on. - # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup. - auto_setup_new_users: false - auto_setup_new_users_with_vm_uuid: false - auto_setup_new_users_with_repository: false - auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog] - - # When new_users_are_active is set to true, new users will be active - # immediately. This skips the "self-activate" step which enforces - # user agreements. Should only be enabled for development. - new_users_are_active: false - - # The e-mail address of the user you would like to become marked as an admin - # user on their first login. - # In the default configuration, authentication happens through the Arvados SSO - # server, which uses OAuth2 against Google's servers, so in that case this - # should be an address associated with a Google account. - auto_admin_user: false - - # If auto_admin_first_user is set to true, the first user to log in when no - # other admin users exist will automatically become an admin user. - auto_admin_first_user: false - - # Email address to notify whenever a user creates a profile for the - # first time - user_profile_notification_address: false - - admin_notifier_email_from: arvados@example.com - email_subject_prefix: "[ARVADOS] " - user_notifier_email_from: arvados@example.com - new_user_notification_recipients: [ ] - new_inactive_user_notification_recipients: [ ] - - - ### - ### Limits, timeouts and durations - ### - - # Lifetime (in seconds) of blob permission signatures generated by - # the API server. This determines how long a client can take (after - # retrieving a collection record) to retrieve the collection data - # from Keep. If the client needs more time than that (assuming the - # collection still has the same content and the relevant user/token - # still has permission) the client can retrieve the collection again - # to get fresh signatures. - # - # This must be exactly equal to the -blob-signature-ttl flag used by - # keepstore servers. Otherwise, reading data blocks and saving - # collections will fail with HTTP 403 permission errors. - # - # Modifying blob_signature_ttl invalidates existing signatures; see - # blob_signing_key note above. - # - # The default is 2 weeks. - blob_signature_ttl: 1209600 - - # Default lifetime for ephemeral collections: 2 weeks. This must not - # be less than blob_signature_ttl. - default_trash_lifetime: 1209600 - - # Interval (seconds) between trash sweeps. During a trash sweep, - # collections are marked as trash if their trash_at time has - # arrived, and deleted if their delete_at time has arrived. - trash_sweep_interval: 60 - - # Interval (seconds) between asynchronous permission view updates. Any - # permission-updating API called with the 'async' parameter schedules a an - # update on the permission view in the future, if not already scheduled. - async_permissions_update_interval: 20 - - # Maximum characters of (JSON-encoded) query parameters to include - # in each request log entry. When params exceed this size, they will - # be JSON-encoded, truncated to this size, and logged as - # params_truncated. - max_request_log_params_size: 2000 - - # Maximum size (in bytes) allowed for a single API request. This - # limit is published in the discovery document for use by clients. - # Note: You must separately configure the upstream web server or - # proxy to actually enforce the desired maximum request size on the - # server side. - max_request_size: 134217728 - - # Limit the number of bytes read from the database during an index - # request (by retrieving and returning fewer rows than would - # normally be returned in a single response). - # Note 1: This setting never reduces the number of returned rows to - # zero, no matter how big the first data row is. - # Note 2: Currently, this is only checked against a specific set of - # columns that tend to get large (collections.manifest_text, - # containers.mounts, workflows.definition). Other fields (e.g., - # "properties" hashes) are not counted against this limit. - max_index_database_read: 134217728 - - # Maximum number of items to return when responding to a APIs that - # can return partial result sets using limit and offset parameters - # (e.g., *.index, groups.contents). If a request specifies a "limit" - # parameter higher than this value, this value is used instead. - max_items_per_response: 1000 - - # When you run the db:delete_old_job_logs task, it will find jobs that - # have been finished for at least this many seconds, and delete their - # stderr logs from the logs table. - clean_job_log_rows_after: <%= 30.days %> - - # When you run the db:delete_old_container_logs task, it will find - # containers that have been finished for at least this many seconds, - # and delete their stdout, stderr, arv-mount, crunch-run, and - # crunchstat logs from the logs table. - clean_container_log_rows_after: <%= 30.days %> - - # Time to keep audit logs, in seconds. (An audit log is a row added - # to the "logs" table in the PostgreSQL database each time an - # Arvados object is created, modified, or deleted.) - # - # Currently, websocket event notifications rely on audit logs, so - # this should not be set lower than 600 (5 minutes). - max_audit_log_age: 1209600 - - # Maximum number of log rows to delete in a single SQL transaction. - # - # If max_audit_log_delete_batch is 0, log entries will never be - # deleted by Arvados. Cleanup can be done by an external process - # without affecting any Arvados system processes, as long as very - # recent (<5 minutes old) logs are not deleted. - # - # 100000 is a reasonable batch size for most sites. - max_audit_log_delete_batch: 0 - - # The maximum number of compute nodes that can be in use simultaneously - # If this limit is reduced, any existing nodes with slot number >= new limit - # will not be counted against the new limit. In other words, the new limit - # won't be strictly enforced until those nodes with higher slot numbers - # go down. - max_compute_nodes: 64 - - # These two settings control how frequently log events are flushed to the - # database. Log lines are buffered until either crunch_log_bytes_per_event - # has been reached or crunch_log_seconds_between_events has elapsed since - # the last flush. - crunch_log_bytes_per_event: 4096 - crunch_log_seconds_between_events: 1 - - # The sample period for throttling logs, in seconds. - crunch_log_throttle_period: 60 - - # Maximum number of bytes that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_bytes: 65536 - - # Maximum number of lines that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_lines: 1024 - - # Maximum bytes that may be logged by a single job. Log bytes that are - # silenced by throttling are not counted against this total. - crunch_limit_log_bytes_per_job: 67108864 - - crunch_log_partial_line_throttle_period: 5 - - # Container logs are written to Keep and saved in a collection, - # which is updated periodically while the container runs. This - # value sets the interval (given in seconds) between collection - # updates. - crunch_log_update_period: 1800 - - # The log collection is also updated when the specified amount of - # log data (given in bytes) is produced in less than one update - # period. - crunch_log_update_size: 33554432 - - # Attributes to suppress in events and audit logs. Notably, - # specifying ["manifest_text"] here typically makes the database - # smaller and faster. - # - # Warning: Using any non-empty value here can have undesirable side - # effects for any client or component that relies on event logs. - # Use at your own risk. - unlogged_attributes: [] - - # API methods to disable. Disabled methods are not listed in the - # discovery document, and respond 404 to all requests. - # Example: ["jobs.create", "pipeline_instances.create"] - disable_api_methods: [] - - # Enable the legacy Jobs API. - # auto -- (default) enable the Jobs API only if it has been used before - # (i.e., there are job records in the database) - # true -- enable the Jobs API despite lack of existing records. - # false -- disable the Jobs API despite presence of existing records. - enable_legacy_jobs_api: auto - - ### - ### Crunch, DNS & compute node management - ### - - # Preemptible instance support (e.g. AWS Spot Instances) - # When true, child containers will get created with the preemptible - # scheduling parameter parameter set. - preemptible_instances: false - - # Docker image to be used when none found in runtime_constraints of a job - default_docker_image_for_jobs: false - - # List of supported Docker Registry image formats that compute nodes - # are able to use. `arv keep docker` will error out if a user tries - # to store an image with an unsupported format. Use an empty array - # to skip the compatibility check (and display a warning message to - # that effect). - # - # Example for sites running docker < 1.10: ["v1"] - # Example for sites running docker >= 1.10: ["v2"] - # Example for disabling check: [] - docker_image_formats: ["v2"] - - # :none or :slurm_immediate - crunch_job_wrapper: :none - - # username, or false = do not set uid when running jobs. - crunch_job_user: crunch - - # The web service must be able to create/write this file, and - # crunch-job must be able to stat() it. - crunch_refresh_trigger: /tmp/crunch_refresh_trigger - - # Path to dns server configuration directory - # (e.g. /etc/unbound.d/conf.d). If false, do not write any config - # files or touch restart.txt (see below). - dns_server_conf_dir: false - - # Template file for the dns server host snippets. See - # unbound.template in this directory for an example. If false, do - # not write any config files. - dns_server_conf_template: false - - # String to write to {dns_server_conf_dir}/restart.txt (with a - # trailing newline) after updating local data. If false, do not - # open or write the restart.txt file. - dns_server_reload_command: false - - # Command to run after each DNS update. Template variables will be - # substituted; see the "unbound" example below. If false, do not run - # a command. - dns_server_update_command: false - - ## Example for unbound: - #dns_server_conf_dir: /etc/unbound/conf.d - #dns_server_conf_template: /path/to/your/api/server/config/unbound.template - ## ...plus one of the following two methods of reloading: - #dns_server_reload_command: unbound-control reload - #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com - - compute_node_domain: false - compute_node_nameservers: - - 192.168.1.1 - - # Hostname to assign to a compute node when it sends a "ping" and the - # hostname in its Node record is nil. - # During bootstrapping, the "ping" script is expected to notice the - # hostname given in the ping response, and update its unix hostname - # accordingly. - # If false, leave the hostname alone (this is appropriate if your compute - # nodes' hostnames are already assigned by some other mechanism). - # - # One way or another, the hostnames of your node records should agree - # with your DNS records and your /etc/slurm-llnl/slurm.conf files. - # - # Example for compute0000, compute0001, ....: - # assign_node_hostname: compute%04d - # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) - assign_node_hostname: compute%d - - - ### - ### Job and container reuse logic. - ### - - # Include details about job reuse decisions in the server log. This - # causes additional database queries to run, so it should not be - # enabled unless you expect to examine the resulting logs for - # troubleshooting purposes. - log_reuse_decisions: false - - # Control job reuse behavior when two completed jobs match the - # search criteria and have different outputs. - # - # If true, in case of a conflict, reuse the earliest job (this is - # similar to container reuse behavior). - # - # If false, in case of a conflict, do not reuse any completed job, - # but do reuse an already-running job if available (this is the - # original job reuse behavior, and is still the default). - reuse_job_if_outputs_differ: false - - ### - ### Federation support. - ### - - # You can enable use of this cluster by users who are authenticated - # by a remote Arvados site. Control which remote hosts are trusted - # to authenticate which user IDs by configuring remote_hosts, - # remote_hosts_via_dns, or both. The default configuration disables - # remote authentication. - - # Map known prefixes to hosts. For example, if user IDs beginning - # with "zzzzz-" should be authenticated by the Arvados server at - # "zzzzz.example.com", use: - # - # remote_hosts: - # zzzzz: zzzzz.example.com - remote_hosts: {} - - # Use {prefix}.arvadosapi.com for any prefix not given in - # remote_hosts above. - remote_hosts_via_dns: false - - # List of cluster prefixes. These are "trusted" clusters, users - # from the clusters listed here will be automatically setup and - # activated. This is separate from the settings - # auto_setup_new_users and new_users_are_active. - auto_activate_users_from: [] - - ### - ### Remaining assorted configuration options. - ### - - arvados_theme: default - - # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the - # Single Sign On (sso) server and remote Arvados sites. Should only - # be enabled during development when the SSO server is using a - # self-signed cert. - sso_insecure: false ## Set Time.zone default to the specified zone and make Active ## Record auto-convert to this zone. Run "rake -D time" for a list @@ -472,17 +28,6 @@ common: # Version of your assets, change this if you want to expire all your assets assets.version: "1.0" - # Allow clients to create collections by providing a manifest with - # unsigned data blob locators. IMPORTANT: This effectively disables - # access controls for data stored in Keep: a client who knows a hash - # can write a manifest that references the hash, pass it to - # collections.create (which will create a permission link), use - # collections.get to obtain a signature for that data locator, and - # use that signed locator to retrieve the data from Keep. Therefore, - # do not turn this on if your users expect to keep data private from - # one another! - permit_create_collection_with_unsigned_manifest: false - default_openid_prefix: https://www.google.com/accounts/o8/id # Override the automatic version string. With the default value of @@ -496,42 +41,6 @@ common: # (included in vendor packages). package_version: false - # Default value for container_count_max for container requests. This is the - # number of times Arvados will create a new container to satisfy a container - # request. If a container is cancelled it will retry a new container if - # container_count < container_count_max on any container requests associated - # with the cancelled container. - container_count_max: 3 - - # Default value for keep_cache_ram of a container's runtime_constraints. - container_default_keep_cache_ram: 268435456 - - # Token to be included in all healthcheck requests. Disabled by default. - # Server expects request header of the format "Authorization: Bearer xxx" - ManagementToken: false - - # URL of keep-web service. Provides read/write access to collections via - # HTTP and WebDAV protocols. - # - # Example: - # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/ - keep_web_service_url: false - - # If true, enable collection versioning. - # When a collection's preserve_version field is true or the current version - # is older than the amount of seconds defined on preserve_version_if_idle, - # a snapshot of the collection's previous state is created and linked to - # the current collection. - collection_versioning: false - # 0 = auto-create a new version on every update. - # -1 = never auto-create new versions. - # > 0 = auto-create a new version when older than the specified number of seconds. - preserve_version_if_idle: -1 - - # Number of times a container can be unlocked before being - # automatically cancelled. - max_container_dispatch_attempts: 5 - development: force_ssl: false cache_classes: false @@ -560,10 +69,6 @@ production: test: force_ssl: false cache_classes: true - public_file_server: - enabled: true - headers: - 'Cache-Control': public, max-age=3600 whiny_nils: true consider_all_requests_local: true action_controller.perform_caching: false diff --git a/services/api/config/application.rb b/services/api/config/application.rb index d6fcc9ea09..9a4270ad9d 100644 --- a/services/api/config/application.rb +++ b/services/api/config/application.rb @@ -45,6 +45,8 @@ module Server # The following is to avoid SafeYAML's warning message SafeYAML::OPTIONS[:default_mode] = :safe + require_relative "arvados_config.rb" + # Settings in config/environments/* take precedence over those specified here. # Application configuration should go into files in config/initializers # -- all .rb files in that directory are automatically loaded. diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb new file mode 100644 index 0000000000..717ddad9e1 --- /dev/null +++ b/services/api/config/arvados_config.rb @@ -0,0 +1,260 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 + +# +# Load Arvados configuration from /etc/arvados/config.yml, using defaults +# from config.default.yml +# +# Existing application.yml is migrated into the new config structure. +# Keys in the legacy application.yml take precedence. +# +# Use "bundle exec config:dump" to get the complete active configuration +# +# Use "bundle exec config:migrate" to migrate application.yml and +# database.yml to config.yml. After adding the output of +# config:migrate to /etc/arvados/config.yml, you will be able to +# delete application.yml and database.yml. + +require 'config_loader' + +begin + # If secret_token.rb exists here, we need to load it first. + require_relative 'secret_token.rb' +rescue LoadError + # Normally secret_token.rb is missing and the secret token is + # configured by application.yml (i.e., here!) instead. +end + +if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and + not defined? WARNED_OMNIAUTH_CONFIG) + Rails.logger.warn <<-EOS +DEPRECATED CONFIGURATION: + Please move your SSO provider config into config/application.yml + and delete config/initializers/omniauth.rb. +EOS + # Real values will be copied from globals by omniauth_init.rb. For + # now, assign some strings so the generic *.yml config loader + # doesn't overwrite them or complain that they're missing. + Rails.configuration.Login["ProviderAppID"] = 'xxx' + Rails.configuration.Login["ProviderAppSecret"] = 'xxx' + Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx' + WARNED_OMNIAUTH_CONFIG = true +end + +# Load the defaults +$arvados_config_defaults = ConfigLoader.load "#{::Rails.root.to_s}/config/config.default.yml" +if $arvados_config_defaults.empty? + raise "Missing #{::Rails.root.to_s}/config/config.default.yml" +end + +clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first +$arvados_config_defaults = clusterConfig +$arvados_config_defaults["ClusterID"] = clusterID + +# Initialize the global config with the defaults +$arvados_config_global = $arvados_config_defaults.deep_dup + +# Load the global config file +confs = ConfigLoader.load "/etc/arvados/config.yml" +if !confs.empty? + clusterID, clusterConfig = confs["Clusters"].first + $arvados_config_global["ClusterID"] = clusterID + + # Copy the cluster config over the defaults + $arvados_config_global.deep_merge!(clusterConfig) +end + +# Now make a copy +$arvados_config = $arvados_config_global.deep_dup + +# Declare all our configuration items. +arvcfg = ConfigLoader.new +arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix +arvcfg.declare_config "ManagementToken", String, :ManagementToken +arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir +arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods +arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size +arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read +arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response +arvcfg.declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval +arvcfg.declare_config "API.RailsSessionSecretToken", NonemptyString, :secret_token +arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users +arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid +arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository +arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist +arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active +arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user +arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user +arvcfg.declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address +arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from +arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix +arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from +arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients +arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients +arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret +arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id +arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure +arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url +arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age +arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch +arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes +arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size +arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication +arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime +arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning +arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle +arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval +arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key +arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl +arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest +arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats +arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions +arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram +arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts +arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max +arvcfg.declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances +arvcfg.declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes +arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event +arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events +arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period +arvcfg.declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes +arvcfg.declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines +arvcfg.declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job +arvcfg.declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period +arvcfg.declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period +arvcfg.declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size +arvcfg.declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after +arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir +arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template +arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command +arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command +arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain +arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers +arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname +arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s } +arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper +arvcfg.declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user +arvcfg.declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger +arvcfg.declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir +arvcfg.declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ +arvcfg.declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs +arvcfg.declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key +arvcfg.declare_config "Mail.MailchimpListID", String, :mailchimp_list_id +arvcfg.declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address +arvcfg.declare_config "Services.Websocket.ExternalURL", URI, :websocket_address +arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url +arvcfg.declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base +arvcfg.declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" } +arvcfg.declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) { + h = {} + v.each do |clusterid, host| + h[clusterid] = { + "Host" => host, + "Proxy" => true, + "Scheme" => "https", + "Insecure" => false, + "ActivateUsers" => false + } + end + ConfigLoader.set_cfg cfg, "RemoteClusters", h +} +arvcfg.declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns + +dbcfg = ConfigLoader.new + +dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool +dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host +dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port +dbcfg.declare_config "PostgreSQL.Connection.User", String, :username +dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password +dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database +dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template +dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding + +application_config = {} +%w(application.default application).each do |cfgfile| + path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml" + confs = ConfigLoader.load(path, erb: true) + # Ignore empty YAML file: + next if confs == false + application_config.deep_merge!(confs['common'] || {}) + application_config.deep_merge!(confs[::Rails.env.to_s] || {}) +end + +db_config = {} +path = "#{::Rails.root.to_s}/config/database.yml" +if File.exist? path + db_config = ConfigLoader.load(path, erb: true) +end + +$remaining_config = arvcfg.migrate_config(application_config, $arvados_config) +dbcfg.migrate_config(db_config[::Rails.env.to_s] || {}, $arvados_config) + +if application_config[:auto_activate_users_from] + application_config[:auto_activate_users_from].each do |cluster| + if $arvados_config.RemoteClusters[cluster] + $arvados_config.RemoteClusters[cluster]["ActivateUsers"] = true + end + end +end + +# Checks for wrongly typed configuration items, coerces properties +# into correct types (such as Duration), and optionally raise error +# for essential configuration that can't be empty. +arvcfg.coercion_and_check $arvados_config_defaults, check_nonempty: false +arvcfg.coercion_and_check $arvados_config_global, check_nonempty: false +arvcfg.coercion_and_check $arvados_config, check_nonempty: true +dbcfg.coercion_and_check $arvados_config, check_nonempty: true + +# * $arvados_config_defaults is the defaults +# * $arvados_config_global is $arvados_config_defaults merged with the contents of /etc/arvados/config.yml +# These are used by the rake config: tasks +# +# * $arvados_config is $arvados_config_global merged with the migrated contents of application.yml +# This is what actually gets copied into the Rails configuration object. + +if $arvados_config["Collections"]["DefaultTrashLifetime"] < 86400.seconds then + raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections.DefaultTrashLifetime +end + +# +# Special case for test database where there's no database.yml, +# because the Arvados config.yml doesn't have a concept of multiple +# rails environments. +# +if ::Rails.env.to_s == "test" && db_config["test"].nil? + $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test" +end + +if $arvados_config["PostgreSQL"]["Connection"]["Password"].empty? + raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}" +end + +dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"] +if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0 + dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}" +end + +# +# If DATABASE_URL is set, then ActiveRecord won't error out if database.yml doesn't exist. +# +# For config migration, we've previously populated the PostgreSQL +# section of the config from database.yml +# +ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:"+ + "#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@"+ + "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?"+ + "template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&"+ + "encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+ + "pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}" + +Server::Application.configure do + # Copy into the Rails config object. This also turns Hash into + # OrderedOptions so that application code can use + # Rails.configuration.API.Blah instead of + # Rails.configuration.API["Blah"] + ConfigLoader.copy_into_config $arvados_config, config + ConfigLoader.copy_into_config $remaining_config, config + config.secret_key_base = config.secret_token +end diff --git a/services/api/config/config.default.yml b/services/api/config/config.default.yml new file mode 120000 index 0000000000..f039aa0149 --- /dev/null +++ b/services/api/config/config.default.yml @@ -0,0 +1 @@ +../../../lib/config/config.default.yml \ No newline at end of file diff --git a/services/api/config/initializers/legacy_jobs_api.rb b/services/api/config/initializers/legacy_jobs_api.rb index 9ea6b28843..8f3b3cb5f8 100644 --- a/services/api/config/initializers/legacy_jobs_api.rb +++ b/services/api/config/initializers/legacy_jobs_api.rb @@ -5,7 +5,6 @@ # Config must be done before we files; otherwise they # won't be able to use Rails.configuration.* to initialize their # classes. -require_relative 'load_config.rb' require 'enable_jobs_api' diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb deleted file mode 100644 index 16059cad76..0000000000 --- a/services/api/config/initializers/load_config.rb +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -begin - # If secret_token.rb exists here, we need to load it first. - require_relative 'secret_token.rb' -rescue LoadError - # Normally secret_token.rb is missing and the secret token is - # configured by application.yml (i.e., here!) instead. -end - -if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and - not defined? WARNED_OMNIAUTH_CONFIG) - Rails.logger.warn <<-EOS -DEPRECATED CONFIGURATION: - Please move your SSO provider config into config/application.yml - and delete config/initializers/omniauth.rb. -EOS - # Real values will be copied from globals by omniauth_init.rb. For - # now, assign some strings so the generic *.yml config loader - # doesn't overwrite them or complain that they're missing. - Rails.configuration.sso_app_id = 'xxx' - Rails.configuration.sso_app_secret = 'xxx' - Rails.configuration.sso_provider_url = '//xxx' - WARNED_OMNIAUTH_CONFIG = true -end - -$application_config = {} - -%w(application.default application).each do |cfgfile| - path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml" - if File.exist? path - yaml = ERB.new(IO.read path).result(binding) - confs = YAML.load(yaml, deserialize_symbols: true) - # Ignore empty YAML file: - next if confs == false - $application_config.merge!(confs['common'] || {}) - $application_config.merge!(confs[::Rails.env.to_s] || {}) - end -end - -Server::Application.configure do - nils = [] - $application_config.each do |k, v| - # "foo.bar: baz" --> { config.foo.bar = baz } - cfg = config - ks = k.split '.' - k = ks.pop - ks.each do |kk| - cfg = cfg.send(kk) - end - if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil? - # Config must have been set already in environments/*.rb. - # - # After config files have been migrated, this mechanism should - # be deprecated, then removed. - elsif v.nil? - # Config variables are not allowed to be nil. Make a "naughty" - # list, and present it below. - nils << k - else - cfg.send "#{k}=", v - end - end - if !nils.empty? - raise < Rails.configuration.max_request_log_params_size - payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]" + if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"] + payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]" else payload[:params] = params end diff --git a/services/api/config/initializers/omniauth_init.rb b/services/api/config/initializers/omniauth_init.rb index b5e98943df..5610999a94 100644 --- a/services/api/config/initializers/omniauth_init.rb +++ b/services/api/config/initializers/omniauth_init.rb @@ -9,15 +9,15 @@ if defined? CUSTOM_PROVIDER_URL Rails.logger.warn "Copying omniauth from globals in legacy config file." - Rails.configuration.sso_app_id = APP_ID - Rails.configuration.sso_app_secret = APP_SECRET - Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL + Rails.configuration.Login["ProviderAppID"] = APP_ID + Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET + Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL else Rails.application.config.middleware.use OmniAuth::Builder do provider(:josh_id, - Rails.configuration.sso_app_id, - Rails.configuration.sso_app_secret, - Rails.configuration.sso_provider_url) + Rails.configuration.Login["ProviderAppID"], + Rails.configuration.Login["ProviderAppSecret"], + Rails.configuration.Services["SSO"]["ExternalURL"]) end OmniAuth.config.on_failure = StaticController.action(:login_failure) end diff --git a/services/api/config/initializers/preload_all_models.rb b/services/api/config/initializers/preload_all_models.rb index 0ab2b032a4..713c61fd75 100644 --- a/services/api/config/initializers/preload_all_models.rb +++ b/services/api/config/initializers/preload_all_models.rb @@ -7,7 +7,6 @@ # Config must be done before we load model class files; otherwise they # won't be able to use Rails.configuration.* to initialize their # classes. -require_relative 'load_config.rb' if Rails.env == 'development' Dir.foreach("#{Rails.root}/app/models") do |model_file| diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql index ebe06b3484..27affcd50f 100644 --- a/services/api/db/structure.sql +++ b/services/api/db/structure.sql @@ -3061,3 +3061,4 @@ INSERT INTO "schema_migrations" (version) VALUES ('20190214214814'), ('20190322174136'); + diff --git a/services/api/lib/audit_logs.rb b/services/api/lib/audit_logs.rb index 56fd935f3f..e97f65a973 100644 --- a/services/api/lib/audit_logs.rb +++ b/services/api/lib/audit_logs.rb @@ -44,8 +44,8 @@ module AuditLogs end def self.tidy_in_background - max_age = Rails.configuration.max_audit_log_age - max_batch = Rails.configuration.max_audit_log_delete_batch + max_age = Rails.configuration.AuditLogs.MaxAge + max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch return if max_age <= 0 || max_batch <= 0 exp = (max_age/14).seconds diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb new file mode 100644 index 0000000000..90b6d9ddc7 --- /dev/null +++ b/services/api/lib/config_loader.rb @@ -0,0 +1,205 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 + +module Psych + module Visitors + class YAMLTree < Psych::Visitors::Visitor + def visit_ActiveSupport_Duration o + seconds = o.to_i + outstr = "" + if seconds / 3600 > 0 + outstr += "#{seconds / 3600}h" + seconds = seconds % 3600 + end + if seconds / 60 > 0 + outstr += "#{seconds / 60}m" + seconds = seconds % 60 + end + if seconds > 0 + outstr += "#{seconds}s" + end + if outstr == "" + outstr = "0s" + end + @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY + end + + def visit_URI_Generic o + @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY + end + + def visit_URI_HTTP o + @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY + end + + def visit_Pathname o + @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY + end + end + end +end + + +module Boolean; end +class TrueClass; include Boolean; end +class FalseClass; include Boolean; end + +class NonemptyString < String +end + +class ConfigLoader + def initialize + @config_migrate_map = {} + @config_types = {} + end + + def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil) + if migrate_from + @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) { + ConfigLoader.set_cfg cfg, assign_to, v + } + end + @config_types[assign_to] = configtype + end + + + def migrate_config from_config, to_config + remainders = {} + from_config.each do |k, v| + if @config_migrate_map[k.to_sym] + @config_migrate_map[k.to_sym].call to_config, k, v + else + remainders[k] = v + end + end + remainders + end + + def coercion_and_check check_cfg, check_nonempty: true + @config_types.each do |cfgkey, cfgtype| + cfg = check_cfg + k = cfgkey + ks = k.split '.' + k = ks.pop + ks.each do |kk| + cfg = cfg[kk] + if cfg.nil? + break + end + end + + if cfg.nil? + raise "missing #{cfgkey}" + end + + if cfgtype == String and !cfg[k] + cfg[k] = "" + end + + if cfgtype == String and cfg[k].is_a? Symbol + cfg[k] = cfg[k].to_s + end + + if cfgtype == Pathname and cfg[k].is_a? String + + if cfg[k] == "" + cfg[k] = Pathname.new("") + else + cfg[k] = Pathname.new(cfg[k]) + if !cfg[k].exist? + raise "#{cfgkey} path #{cfg[k]} does not exist" + end + end + end + + if cfgtype == NonemptyString + if (!cfg[k] || cfg[k] == "") && check_nonempty + raise "#{cfgkey} cannot be empty" + end + if cfg[k].is_a? String + next + end + end + + if cfgtype == ActiveSupport::Duration + if cfg[k].is_a? Integer + cfg[k] = cfg[k].seconds + elsif cfg[k].is_a? String + cfg[k] = ConfigLoader.parse_duration cfg[k] + end + end + + if cfgtype == URI + cfg[k] = URI(cfg[k]) + end + + if !cfg[k].is_a? cfgtype + raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}" + end + end + end + + def self.set_cfg cfg, k, v + # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz } + ks = k.split '.' + k = ks.pop + ks.each do |kk| + cfg = cfg[kk] + if cfg.nil? + break + end + end + if !cfg.nil? + cfg[k] = v + end + end + + def self.parse_duration durstr + duration_re = /(\d+(\.\d+)?)(s|m|h)/ + dursec = 0 + while durstr != "" + mt = duration_re.match durstr + if !mt + raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h" + end + multiplier = {s: 1, m: 60, h: 3600} + dursec += (Float(mt[1]) * multiplier[mt[3].to_sym]) + durstr = durstr[mt[0].length..-1] + end + return dursec.seconds + end + + def self.copy_into_config src, dst + src.each do |k, v| + dst.send "#{k}=", self.to_OrderedOptions(v) + end + end + + def self.to_OrderedOptions confs + if confs.is_a? Hash + opts = ActiveSupport::OrderedOptions.new + confs.each do |k,v| + opts[k] = self.to_OrderedOptions(v) + end + opts + elsif confs.is_a? Array + confs.map { |v| self.to_OrderedOptions v } + else + confs + end + end + + def self.load path, erb: false + if File.exist? path + yaml = IO.read path + if erb + yaml = ERB.new(yaml).result(binding) + end + YAML.load(yaml, deserialize_symbols: false) + else + {} + end + end + +end diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb index 449d7d5162..4e640186d1 100644 --- a/services/api/lib/crunch_dispatch.rb +++ b/services/api/lib/crunch_dispatch.rb @@ -31,13 +31,13 @@ class CrunchDispatch @cgroup_root = ENV['CRUNCH_CGROUP_ROOT'] @srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT'] - @arvados_internal = Rails.configuration.git_internal_dir + @arvados_internal = Rails.configuration.Containers.JobsAPI.GitInternalDir if not File.exist? @arvados_internal $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}` raise "No internal git repository available" unless ($? == 0) end - @repo_root = Rails.configuration.git_repositories_dir + @repo_root = Rails.configuration.Git.Repositories @arvados_repo_path = Repository.where(name: "arvados").first.server_path @authorizations = {} @did_recently = {} @@ -110,7 +110,7 @@ class CrunchDispatch end def update_node_status - return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/) + return unless Rails.configuration.Containers.JobsAPI.CrunchJobWrapper.to_s.match(/^slurm/) slurm_status.each_pair do |hostname, slurmdata| next if @node_state[hostname] == slurmdata begin @@ -337,14 +337,14 @@ class CrunchDispatch next if @running[job.uuid] cmd_args = nil - case Server::Application.config.crunch_job_wrapper - when :none + case Rails.configuration.Containers.JobsAPI.CrunchJobWrapper + when "none" if @running.size > 0 # Don't run more than one at a time. return end cmd_args = [] - when :slurm_immediate + when "slurm_immediate" nodelist = nodes_available_for_job(job) if nodelist.nil? if Time.now < @node_wait_deadline @@ -361,7 +361,7 @@ class CrunchDispatch "--job-name=#{job.uuid}", "--nodelist=#{nodelist.join(',')}"] else - raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}" + raise "Unknown crunch_job_wrapper: #{Rails.configuration.Containers.JobsAPI.CrunchJobWrapper}" end cmd_args = sudo_preface + cmd_args @@ -460,7 +460,7 @@ class CrunchDispatch bytes_logged: 0, events_logged: 0, log_throttle_is_open: true, - log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period, + log_throttle_reset_time: Time.now + Rails.configuration.Containers.Logging.LogThrottlePeriod, log_throttle_bytes_so_far: 0, log_throttle_lines_so_far: 0, log_throttle_bytes_skipped: 0, @@ -485,7 +485,7 @@ class CrunchDispatch matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/) if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]') partial_line = true - if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period + if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod running_job[:log_throttle_partial_line_last_at] = Time.now else skip_counts = true @@ -499,26 +499,26 @@ class CrunchDispatch end if (running_job[:bytes_logged] > - Rails.configuration.crunch_limit_log_bytes_per_job) - message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated." + Rails.configuration.Containers.Logging.LimitLogBytesPerJob) + message = "Exceeded log limit #{Rails.configuration.Containers.Logging.LimitLogBytesPerJob} bytes (LimitLogBytesPerJob). Log will be truncated." running_job[:log_throttle_reset_time] = Time.now + 100.years running_job[:log_throttle_is_open] = false elsif (running_job[:log_throttle_bytes_so_far] > - Rails.configuration.crunch_log_throttle_bytes) + Rails.configuration.Containers.Logging.LogThrottleBytes) remaining_time = running_job[:log_throttle_reset_time] - Time.now - message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds." + message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleBytes} bytes per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleBytes). Logging will be silenced for the next #{remaining_time.round} seconds." running_job[:log_throttle_is_open] = false elsif (running_job[:log_throttle_lines_so_far] > - Rails.configuration.crunch_log_throttle_lines) + Rails.configuration.Containers.Logging.LogThrottleLines) remaining_time = running_job[:log_throttle_reset_time] - Time.now - message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds." + message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleLines} lines per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleLines), logging will be silenced for the next #{remaining_time.round} seconds." running_job[:log_throttle_is_open] = false elsif partial_line and running_job[:log_throttle_first_partial_line] running_job[:log_throttle_first_partial_line] = false - message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds." + message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod} seconds." end end @@ -552,7 +552,7 @@ class CrunchDispatch j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n" end - j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period + j[:log_throttle_reset_time] = now + Rails.configuration.Containers.Logging.LogThrottlePeriod j[:log_throttle_bytes_so_far] = 0 j[:log_throttle_lines_so_far] = 0 j[:log_throttle_bytes_skipped] = 0 @@ -592,7 +592,7 @@ class CrunchDispatch bufend = '' streambuf.each_line do |line| if not line.end_with? $/ - if line.size > Rails.configuration.crunch_log_throttle_bytes + if line.size > Rails.configuration.Containers.Logging.LogThrottleBytes # Without a limit here, we'll use 2x an arbitrary amount # of memory, and waste a lot of time copying strings # around, all without providing any feedback to anyone @@ -775,7 +775,7 @@ class CrunchDispatch # This is how crunch-job child procs know where the "refresh" # trigger file is - ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger + ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger # If salloc can't allocate resources immediately, make it use our # temporary failure exit code. This ensures crunch-dispatch won't @@ -902,9 +902,9 @@ class CrunchDispatch end def sudo_preface - return [] if not Server::Application.config.crunch_job_user + return [] if not Rails.configuration.Containers.JobsAPI.CrunchJobUser ["sudo", "-E", "-u", - Server::Application.config.crunch_job_user, + Rails.configuration.Containers.JobsAPI.CrunchJobUser, "LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}", "PATH=#{ENV['PATH']}", "PERLLIB=#{ENV['PERLLIB']}", @@ -937,8 +937,8 @@ class CrunchDispatch # Send out to log event if buffer size exceeds the bytes per event or if # it has been at least crunch_log_seconds_between_events seconds since # the last flush. - if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or - (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events + if running_job[:stderr_buf_to_flush].size > Rails.configuration.Containers.Logging.LogBytesPerEvent or + (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.Containers.Logging.LogSecondsBetweenEvents begin log = Log.new(object_uuid: running_job[:job].uuid, event_type: 'stderr', @@ -957,7 +957,7 @@ class CrunchDispatch # An array of job_uuids in squeue def squeue_jobs - if Rails.configuration.crunch_job_wrapper == :slurm_immediate + if Rails.configuration.Containers.JobsAPI.CrunchJobWrapper == "slurm_immediate" p = IO.popen(['squeue', '-a', '-h', '-o', '%j']) begin p.readlines.map {|line| line.strip} diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb index 49638677b1..c7b48c0cdd 100644 --- a/services/api/lib/current_api_client.rb +++ b/services/api/lib/current_api_client.rb @@ -42,25 +42,25 @@ module CurrentApiClient end def system_user_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, User.uuid_prefix, '000000000000000'].join('-') end def system_group_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, Group.uuid_prefix, '000000000000000'].join('-') end def anonymous_group_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, Group.uuid_prefix, 'anonymouspublic'].join('-') end def anonymous_user_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, User.uuid_prefix, 'anonymouspublic'].join('-') end @@ -105,7 +105,7 @@ module CurrentApiClient end def all_users_group_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, Group.uuid_prefix, 'fffffffffffffff'].join('-') end diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb index 63543ab3ad..c909ae9227 100644 --- a/services/api/lib/enable_jobs_api.rb +++ b/services/api/lib/enable_jobs_api.rb @@ -31,9 +31,9 @@ Disable_jobs_api_method_list = ["jobs.create", "job_tasks.show"] def check_enable_legacy_jobs_api - if Rails.configuration.enable_legacy_jobs_api == false || - (Rails.configuration.enable_legacy_jobs_api == "auto" && + if Rails.configuration.Containers.JobsAPI.Enable == "false" || + (Rails.configuration.Containers.JobsAPI.Enable == "auto" && Job.count == 0) - Rails.configuration.disable_api_methods += Disable_jobs_api_method_list + Rails.configuration.API.DisabledAPIs += Disable_jobs_api_method_list end end diff --git a/services/api/lib/has_uuid.rb b/services/api/lib/has_uuid.rb index 60db53d5e6..2074566941 100644 --- a/services/api/lib/has_uuid.rb +++ b/services/api/lib/has_uuid.rb @@ -30,7 +30,7 @@ module HasUuid Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1] end def generate_uuid - [Server::Application.config.uuid_prefix, + [Rails.configuration.ClusterID, self.uuid_prefix, rand(2**256).to_s(36)[-15..-1]]. join '-' diff --git a/services/api/lib/josh_id.rb b/services/api/lib/josh_id.rb index bb6c1f48a8..f18c0edda0 100644 --- a/services/api/lib/josh_id.rb +++ b/services/api/lib/josh_id.rb @@ -40,7 +40,7 @@ module OmniAuth options.client_options[:site] = options[:custom_provider_url] options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize" options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token" - if Rails.configuration.sso_insecure + if Rails.configuration.TLS.Insecure options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE} end ::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options)) diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb index 736f270e96..7119eb2348 100644 --- a/services/api/lib/load_param.rb +++ b/services/api/lib/load_param.rb @@ -56,7 +56,7 @@ module LoadParam raise ArgumentError.new("Invalid value for limit parameter") end @limit = [params[:limit].to_i, - Rails.configuration.max_items_per_response].min + Rails.configuration.API.MaxItemsPerResponse].min else @limit = DEFAULT_LIMIT end diff --git a/services/api/lib/log_reuse_info.rb b/services/api/lib/log_reuse_info.rb index ed5cc82bfd..5c7efd7ded 100644 --- a/services/api/lib/log_reuse_info.rb +++ b/services/api/lib/log_reuse_info.rb @@ -9,7 +9,7 @@ module LogReuseInfo # doing expensive things like database queries, and we want to skip # those when logging is disabled. def log_reuse_info(candidates=nil) - if Rails.configuration.log_reuse_decisions + if Rails.configuration.Containers.LogReuseDecisions msg = yield if !candidates.nil? msg = "have #{candidates.count} candidates " + msg diff --git a/services/api/lib/refresh_permission_view.rb b/services/api/lib/refresh_permission_view.rb index 25be3c08d4..5d6081f262 100644 --- a/services/api/lib/refresh_permission_view.rb +++ b/services/api/lib/refresh_permission_view.rb @@ -12,8 +12,8 @@ def do_refresh_permission_view end def refresh_permission_view(async=false) - if async and Rails.configuration.async_permissions_update_interval > 0 - exp = Rails.configuration.async_permissions_update_interval.seconds + if async and Rails.configuration.API.AsyncPermissionsUpdateInterval > 0 + exp = Rails.configuration.API.AsyncPermissionsUpdateInterval.seconds need = false Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do need = true diff --git a/services/api/lib/sweep_trashed_objects.rb b/services/api/lib/sweep_trashed_objects.rb index bedbd68a44..8613c749cf 100644 --- a/services/api/lib/sweep_trashed_objects.rb +++ b/services/api/lib/sweep_trashed_objects.rb @@ -55,8 +55,8 @@ module SweepTrashedObjects end def self.sweep_if_stale - return if Rails.configuration.trash_sweep_interval <= 0 - exp = Rails.configuration.trash_sweep_interval.seconds + return if Rails.configuration.Collections.TrashSweepInterval <= 0 + exp = Rails.configuration.Collections.TrashSweepInterval.seconds need = false Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do need = true diff --git a/services/api/lib/tasks/config.rake b/services/api/lib/tasks/config.rake new file mode 100644 index 0000000000..e7d6ab4566 --- /dev/null +++ b/services/api/lib/tasks/config.rake @@ -0,0 +1,51 @@ +# Copyright (C) The Arvados Authors. All rights reserved. +# +# SPDX-License-Identifier: AGPL-3.0 + +def diff_hash base, final + diffed = {} + base.each do |k,v| + bk = base[k] + fk = final[k] + if bk.is_a? Hash + d = diff_hash bk, fk + if d.length > 0 + diffed[k] = d + end + else + if bk.to_yaml != fk.to_yaml + diffed[k] = fk + end + end + end + diffed +end + +namespace :config do + desc 'Print items that differ between legacy application.yml and system config.yml' + task diff: :environment do + diffed = diff_hash $arvados_config_global, $arvados_config + cfg = { "Clusters" => {}} + cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"} + if cfg["Clusters"][$arvados_config["ClusterID"]].empty? + puts "No migrations required for /etc/arvados/config.yml" + else + puts cfg.to_yaml + end + end + + desc 'Print config.yml after merging with legacy application.yml' + task migrate: :environment do + diffed = diff_hash $arvados_config_defaults, $arvados_config + cfg = { "Clusters" => {}} + cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"} + puts cfg.to_yaml + end + + desc 'Print configuration as accessed through Rails.configuration' + task dump: :environment do + combined = $arvados_config.deep_dup + combined.update $remaining_config + puts combined.to_yaml + end +end diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake deleted file mode 100644 index 4f071f11a3..0000000000 --- a/services/api/lib/tasks/config_check.rake +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -namespace :config do - desc 'Ensure site configuration has all required settings' - task check: :environment do - $stderr.puts "%-32s %s" % ["AppVersion (discovered)", AppVersion.hash] - $application_config.sort.each do |k, v| - if ENV.has_key?('QUIET') then - # Make sure we still check for the variable to exist - eval("Rails.configuration.#{k}") - else - if /(password|secret|signing_key)/.match(k) then - # Make sure we still check for the variable to exist, but don't print the value - eval("Rails.configuration.#{k}") - $stderr.puts "%-32s %s" % [k, '*********'] - else - $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")] - end - end - end - # default_trash_lifetime cannot be less than 24 hours - if Rails.configuration.default_trash_lifetime < 86400 then - raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime - end - end -end diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake deleted file mode 100644 index ed349600b8..0000000000 --- a/services/api/lib/tasks/config_dump.rake +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) The Arvados Authors. All rights reserved. -# -# SPDX-License-Identifier: AGPL-3.0 - -namespace :config do - desc 'Show site configuration' - task dump: :environment do - puts $application_config.to_yaml - end -end diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake index b45113e8a5..c5c5cdc769 100644 --- a/services/api/lib/tasks/delete_old_container_logs.rake +++ b/services/api/lib/tasks/delete_old_container_logs.rake @@ -11,7 +11,7 @@ namespace :db do desc "Remove old container log entries from the logs table" task delete_old_container_logs: :environment do - delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')" + delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')" ActiveRecord::Base.connection.execute(delete_sql) end diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake index dcd92b19bc..3c1c049998 100644 --- a/services/api/lib/tasks/delete_old_job_logs.rake +++ b/services/api/lib/tasks/delete_old_job_logs.rake @@ -9,7 +9,7 @@ namespace :db do desc "Remove old job stderr entries from the logs table" task delete_old_job_logs: :environment do - delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')" + delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')" ActiveRecord::Base.connection.execute(delete_sql) end diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb index 9687962964..f7faabc4c2 100644 --- a/services/api/lib/trashable.rb +++ b/services/api/lib/trashable.rb @@ -50,7 +50,7 @@ module Trashable if trash_at.nil? self.delete_at = nil else - self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds + self.delete_at = trash_at + Rails.configuration.Collections.DefaultTrashLifetime.seconds end elsif !trash_at || !delete_at || trash_at > delete_at # Not trash, or bogus arguments? Just validate in @@ -65,7 +65,7 @@ module Trashable earliest_delete = [ @validation_timestamp, trash_at_was, - ].compact.min + Rails.configuration.blob_signature_ttl.seconds + ].compact.min + Rails.configuration.Collections.BlobSigningTTL.seconds # The previous value of delete_at is also an upper bound on the # longest-lived permission token. For example, if TTL=14, @@ -96,7 +96,7 @@ module TrashableController @object.update_attributes!(trash_at: db_current_time) end earliest_delete = (@object.trash_at + - Rails.configuration.blob_signature_ttl.seconds) + Rails.configuration.Collections.BlobSigningTTL.seconds) if @object.delete_at > earliest_delete @object.update_attributes!(delete_at: earliest_delete) end diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb index ee2b016cd5..cc545b2fd1 100644 --- a/services/api/test/functional/arvados/v1/collections_controller_test.rb +++ b/services/api/test/functional/arvados/v1/collections_controller_test.rb @@ -11,7 +11,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase def permit_unsigned_manifests isok=true # Set security model for the life of a test. - Rails.configuration.permit_create_collection_with_unsigned_manifest = isok + Rails.configuration.Collections.BlobSigning = !isok end def assert_signed_manifest manifest_text, label='', token: false @@ -24,7 +24,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase exp = tok[/\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16) sig = Blob.sign_locator( bare, - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, expire: exp, api_token: token)[/\+A[^\+]*/, 0] assert_includes tok, sig @@ -88,7 +88,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase token = api_client_authorizations(:active).send(token_method) signed = Blob.sign_locator( 'acbd18db4cc2f85cedef654fccc4a4d8+3', - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: token) authorize_with_token token put :update, params: { @@ -221,7 +221,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase def request_capped_index(params={}) authorize_with :user1_with_load coll1 = collections(:collection_1_of_201) - Rails.configuration.max_index_database_read = + Rails.configuration.API.MaxIndexDatabaseRead = yield(coll1.manifest_text.size) get :index, params: { select: %w(uuid manifest_text), @@ -566,7 +566,7 @@ EOS # Build a manifest with both signed and unsigned locators. signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } signed_locators = locators.collect do |x| @@ -622,7 +622,7 @@ EOS # TODO(twp): in phase 4, all locators will need to be signed, so # this test should break and will need to be rewritten. Issue #2755. signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), ttl: 3600 # 1 hour } @@ -653,7 +653,7 @@ EOS test "create fails with invalid signature" do authorize_with :active signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } @@ -683,7 +683,7 @@ EOS test "create fails with uuid of signed manifest" do authorize_with :active signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } @@ -755,7 +755,7 @@ EOS ea10d51bcf88862dbcc36eb292017dfd+45) signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } @@ -903,7 +903,7 @@ EOS [1, 5, nil].each do |ask| test "Set replication_desired=#{ask.inspect}" do - Rails.configuration.default_collection_replication = 2 + Rails.configuration.Collections.DefaultReplication = 2 authorize_with :active put :update, params: { id: collections(:replication_undesired_unconfirmed).uuid, @@ -1176,7 +1176,7 @@ EOS assert_response 200 c = Collection.find_by_uuid(uuid) assert_operator c.trash_at, :<, db_current_time - assert_equal c.delete_at, c.trash_at + Rails.configuration.blob_signature_ttl + assert_equal c.delete_at, c.trash_at + Rails.configuration.Collections.BlobSigningTTL end test 'delete long-trashed collection immediately using http DELETE verb' do @@ -1208,7 +1208,7 @@ EOS assert_response 200 c = Collection.find_by_uuid(uuid) assert_operator c.trash_at, :<, db_current_time - assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.default_trash_lifetime + assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.Collections.DefaultTrashLifetime end end @@ -1373,8 +1373,8 @@ EOS end test "update collection with versioning enabled" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 1 # 1 second + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second col = collections(:collection_owned_by_active) assert_equal 2, col.version @@ -1383,7 +1383,7 @@ EOS token = api_client_authorizations(:active).v2token signed = Blob.sign_locator( 'acbd18db4cc2f85cedef654fccc4a4d8+3', - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: token) authorize_with_token token put :update, params: { diff --git a/services/api/test/functional/arvados/v1/filters_test.rb b/services/api/test/functional/arvados/v1/filters_test.rb index b596baaae4..d49fe7a3e6 100644 --- a/services/api/test/functional/arvados/v1/filters_test.rb +++ b/services/api/test/functional/arvados/v1/filters_test.rb @@ -108,7 +108,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase format: :json, count: 'none', limit: 1000, - filters: [['any', '@@', Rails.configuration.uuid_prefix]], + filters: [['any', '@@', Rails.configuration.ClusterID]], } assert_response :success @@ -137,7 +137,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase limit: 1000, offset: '5', last_object_class: 'PipelineInstance', - filters: [['any', '@@', Rails.configuration.uuid_prefix]], + filters: [['any', '@@', Rails.configuration.ClusterID]], } assert_response :success diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb index 37b606409e..cefb7f3830 100644 --- a/services/api/test/functional/arvados/v1/groups_controller_test.rb +++ b/services/api/test/functional/arvados/v1/groups_controller_test.rb @@ -431,7 +431,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase end test 'get contents with jobs and pipeline instances disabled' do - Rails.configuration.disable_api_methods = ['jobs.index', 'pipeline_instances.index'] + Rails.configuration.API.DisabledAPIs = ['jobs.index', 'pipeline_instances.index'] authorize_with :active get :contents, params: { @@ -444,7 +444,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase test 'get contents with low max_index_database_read' do # Some result will certainly have at least 12 bytes in a # restricted column - Rails.configuration.max_index_database_read = 12 + Rails.configuration.API.MaxIndexDatabaseRead = 12 authorize_with :active get :contents, params: { id: groups(:aproject).uuid, diff --git a/services/api/test/functional/arvados/v1/jobs_controller_test.rb b/services/api/test/functional/arvados/v1/jobs_controller_test.rb index fb81f23636..b3e10bf4a4 100644 --- a/services/api/test/functional/arvados/v1/jobs_controller_test.rb +++ b/services/api/test/functional/arvados/v1/jobs_controller_test.rb @@ -83,7 +83,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase # We need to verify that "cancel" creates a trigger file, so first # let's make sure there is no stale trigger file. begin - File.unlink(Rails.configuration.crunch_refresh_trigger) + File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger) rescue Errno::ENOENT end @@ -105,7 +105,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase 'server should correct bogus cancelled_at ' + job['cancelled_at']) assert_equal(true, - File.exist?(Rails.configuration.crunch_refresh_trigger), + File.exist?(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger), 'trigger file should be created when job is cancelled') end @@ -123,7 +123,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase # We need to verify that "cancel" creates a trigger file, so first # let's make sure there is no stale trigger file. begin - File.unlink(Rails.configuration.crunch_refresh_trigger) + File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger) rescue Errno::ENOENT end @@ -144,7 +144,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase # We need to verify that "cancel" creates a trigger file, so first # let's make sure there is no stale trigger file. begin - File.unlink(Rails.configuration.crunch_refresh_trigger) + File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger) rescue Errno::ENOENT end @@ -480,7 +480,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase end test 'jobs.create disabled in config' do - Rails.configuration.disable_api_methods = ["jobs.create", + Rails.configuration.API.DisabledAPIs = ["jobs.create", "pipeline_instances.create"] authorize_with :active post :create, params: { diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb index 0beff6882a..c61a57ecc8 100644 --- a/services/api/test/functional/arvados/v1/nodes_controller_test.rb +++ b/services/api/test/functional/arvados/v1/nodes_controller_test.rb @@ -223,7 +223,7 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase end test "node should fail ping with invalid hostname config format" do - Rails.configuration.assign_node_hostname = 'compute%04' # should end with "04d" + Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%04' # should end with "04d" post :ping, params: { id: nodes(:new_with_no_hostname).uuid, ping_secret: nodes(:new_with_no_hostname).info['ping_secret'], diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb index b810d69939..537fe52527 100644 --- a/services/api/test/functional/arvados/v1/repositories_controller_test.rb +++ b/services/api/test/functional/arvados/v1/repositories_controller_test.rb @@ -200,15 +200,15 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase end [ - {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:"}, - {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:"}, - {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ }, - {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"}, - {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"}, - {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ }, + {cfg: "GitSSH.ExternalURL", cfgval: URI("ssh://git@example.com"), match: %r"^git@example.com:"}, + {cfg: "GitSSH.ExternalURL", cfgval: URI(""), match: %r"^git@git.zzzzz.arvadosapi.com:"}, + {cfg: "GitSSH", cfgval: false, refute: /^git@/ }, + {cfg: "GitHTTP.ExternalURL", cfgval: URI("https://example.com/"), match: %r"^https://example.com/"}, + {cfg: "GitHTTP.ExternalURL", cfgval: URI(""), match: %r"^https://git.zzzzz.arvadosapi.com/"}, + {cfg: "GitHTTP", cfgval: false, refute: /^http/ }, ].each do |expect| test "set #{expect[:cfg]} to #{expect[:cfgval]}" do - Rails.configuration.send expect[:cfg].to_s+"=", expect[:cfgval] + ConfigLoader.set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval] authorize_with :active get :index assert_response :success diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb index 53c1ed72e7..e62faa3314 100644 --- a/services/api/test/functional/arvados/v1/schema_controller_test.rb +++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb @@ -29,12 +29,12 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase assert_response :success discovery_doc = JSON.parse(@response.body) assert_includes discovery_doc, 'defaultTrashLifetime' - assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime + assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections.DefaultTrashLifetime assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version']) assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion']) assert_match(/^unknown$/, discovery_doc['packageVersion']) - assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address - assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address + assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services.Websocket.ExternalURL.to_s + assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services.Workbench1.ExternalURL.to_s assert_equal('zzzzz', discovery_doc['uuidPrefix']) end @@ -65,7 +65,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase end test "non-empty disable_api_methods" do - Rails.configuration.disable_api_methods = + Rails.configuration.API.DisabledAPIs = ['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create'] get :index assert_response :success diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb index 22a44a97ab..0501da1673 100644 --- a/services/api/test/functional/arvados/v1/users_controller_test.rb +++ b/services/api/test/functional/arvados/v1/users_controller_test.rb @@ -638,12 +638,12 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase setup_email = ActionMailer::Base.deliveries.last assert_not_nil setup_email, 'Expected email after setup' - assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0] + assert_equal Rails.configuration.Users.UserNotifierEmailFrom, setup_email.from[0] assert_equal 'foo@example.com', setup_email.to[0] assert_equal 'Welcome to Arvados - shell account enabled', setup_email.subject assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'), 'Expected Your Arvados shell account has been set up in email body' - assert (setup_email.body.to_s.include? "#{Rails.configuration.workbench_address}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body' + assert (setup_email.body.to_s.include? "#{Rails.configuration.Services.Workbench1.ExternalURL}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body' end test "setup inactive user by changing is_active to true" do diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb index 170b59ee1e..03189bdfea 100644 --- a/services/api/test/helpers/git_test_helper.rb +++ b/services/api/test/helpers/git_test_helper.rb @@ -19,14 +19,14 @@ module GitTestHelper def self.included base base.setup do # Extract the test repository data into the default test - # environment's Rails.configuration.git_repositories_dir. (We + # environment's Rails.configuration.Git.Repositories. (We # don't use that config setting here, though: it doesn't seem # worth the risk of stepping on a real git repo root.) @tmpdir = Rails.root.join 'tmp', 'git' FileUtils.mkdir_p @tmpdir system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar") - Rails.configuration.git_repositories_dir = "#{@tmpdir}/test" - Rails.configuration.git_internal_dir = "#{@tmpdir}/internal.git" + Rails.configuration.Git.Repositories = "#{@tmpdir}/test" + Rails.configuration.Containers.JobsAPI.GitInternalDir = "#{@tmpdir}/internal.git" end base.teardown do @@ -37,7 +37,7 @@ module GitTestHelper end def internal_tag tag - IO.read "|git --git-dir #{Rails.configuration.git_internal_dir.shellescape} log --format=format:%H -n1 #{tag.shellescape}" + IO.read "|git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape} log --format=format:%H -n1 #{tag.shellescape}" end # Intercept fetch_remote_repository and fetch from a specified url diff --git a/services/api/test/integration/collections_api_test.rb b/services/api/test/integration/collections_api_test.rb index 7096575342..ab1a3e69de 100644 --- a/services/api/test/integration/collections_api_test.rb +++ b/services/api/test/integration/collections_api_test.rb @@ -129,7 +129,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest test "store collection as json" do signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44', @@ -146,7 +146,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest test "store collection with manifest_text only" do signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44', @@ -163,7 +163,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest test "store collection then update name" do signing_opts = { - key: Rails.configuration.blob_signing_key, + key: Rails.configuration.Collections.BlobSigningKey, api_token: api_token(:active), } signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44', diff --git a/services/api/test/integration/groups_test.rb b/services/api/test/integration/groups_test.rb index e45dd4eb52..eb97fc1f49 100644 --- a/services/api/test/integration/groups_test.rb +++ b/services/api/test/integration/groups_test.rb @@ -194,7 +194,7 @@ class NonTransactionalGroupsTest < ActionDispatch::IntegrationTest end test "create request with async=true defers permissions update" do - Rails.configuration.async_permissions_update_interval = 1 # second + Rails.configuration.API.AsyncPermissionsUpdateInterval = 1 # second name = "Random group #{rand(1000)}" assert_equal nil, Group.find_by_name(name) diff --git a/services/api/test/integration/remote_user_test.rb b/services/api/test/integration/remote_user_test.rb index 5c09cf1bcc..90a5586539 100644 --- a/services/api/test/integration/remote_user_test.rb +++ b/services/api/test/integration/remote_user_test.rb @@ -63,8 +63,8 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest ready.pop @remote_server = srv @remote_host = "127.0.0.1:#{srv.config[:Port]}" - Rails.configuration.remote_hosts = Rails.configuration.remote_hosts.merge({'zbbbb' => @remote_host, - 'zbork' => @remote_host}) + Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({zbbbb: ActiveSupport::InheritableOptions.new({Host: @remote_host}), + zbork: ActiveSupport::InheritableOptions.new({Host: @remote_host})}) Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns "https://#{@remote_host}" @stub_status = 200 @stub_content = { @@ -243,7 +243,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest end test 'auto-activate user from trusted cluster' do - Rails.configuration.auto_activate_users_from = ['zbbbb'] + Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = true get '/arvados/v1/users/current', params: {format: 'json'}, headers: auth(remote: 'zbbbb') diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb index f2dbaa5069..fdb8628c5d 100644 --- a/services/api/test/integration/user_sessions_test.rb +++ b/services/api/test/integration/user_sessions_test.rb @@ -111,10 +111,10 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest ].each do |testcase| test "user auto-activate #{testcase.inspect}" do # Configure auto_setup behavior according to testcase[:cfg] - Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto] - Rails.configuration.auto_setup_new_users_with_vm_uuid = - (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false) - Rails.configuration.auto_setup_new_users_with_repository = + Rails.configuration.Users.AutoSetupNewUsers = testcase[:cfg][:auto] + Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = + (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : "") + Rails.configuration.Users.AutoSetupNewUsersWithRepository = testcase[:cfg][:repo] mock_auth_with(email: testcase[:email]) diff --git a/services/api/test/tasks/delete_old_container_logs_test.rb b/services/api/test/tasks/delete_old_container_logs_test.rb index 45278ac1aa..c81b331f24 100644 --- a/services/api/test/tasks/delete_old_container_logs_test.rb +++ b/services/api/test/tasks/delete_old_container_logs_test.rb @@ -16,7 +16,7 @@ class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase end def run_with_expiry(clean_after) - Rails.configuration.clean_container_log_rows_after = clean_after + Rails.configuration.Containers.Logging.MaxAge = clean_after Rake::Task[TASK_NAME].reenable Rake.application.invoke_task TASK_NAME end diff --git a/services/api/test/tasks/delete_old_job_logs_test.rb b/services/api/test/tasks/delete_old_job_logs_test.rb index 4d4cdbc9e5..00660431c3 100644 --- a/services/api/test/tasks/delete_old_job_logs_test.rb +++ b/services/api/test/tasks/delete_old_job_logs_test.rb @@ -16,7 +16,7 @@ class DeleteOldJobLogsTaskTest < ActiveSupport::TestCase end def run_with_expiry(clean_after) - Rails.configuration.clean_job_log_rows_after = clean_after + Rails.configuration.Containers.Logging.MaxAge = clean_after Rake::Task[TASK_NAME].reenable Rake.application.invoke_task TASK_NAME end diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb index 939242cf8e..5747a85cf5 100644 --- a/services/api/test/test_helper.rb +++ b/services/api/test/test_helper.rb @@ -99,11 +99,8 @@ class ActiveSupport::TestCase def restore_configuration # Restore configuration settings changed during tests - $application_config.each do |k,v| - if k.match(/^[^.]*$/) - Rails.configuration.send (k + '='), v - end - end + ConfigLoader.copy_into_config $arvados_config, Rails.configuration + ConfigLoader.copy_into_config $remaining_config, Rails.configuration end def set_user_from_auth(auth_name) diff --git a/services/api/test/unit/blob_test.rb b/services/api/test/unit/blob_test.rb index 429ebde976..293e28e4fa 100644 --- a/services/api/test/unit/blob_test.rb +++ b/services/api/test/unit/blob_test.rb @@ -130,14 +130,14 @@ class BlobTest < ActiveSupport::TestCase expire: 0x7fffffff, } - original_ttl = Rails.configuration.blob_signature_ttl - Rails.configuration.blob_signature_ttl = original_ttl*2 + original_ttl = Rails.configuration.Collections.BlobSigningTTL + Rails.configuration.Collections.BlobSigningTTL = original_ttl*2 signed2 = Blob.sign_locator @@known_locator, { api_token: @@known_token, key: @@known_key, expire: 0x7fffffff, } - Rails.configuration.blob_signature_ttl = original_ttl + Rails.configuration.Collections.BlobSigningTTL = original_ttl assert_not_equal signed, signed2 end diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb index 8deedee018..477f9e2750 100644 --- a/services/api/test/unit/collection_test.rb +++ b/services/api/test/unit/collection_test.rb @@ -157,8 +157,8 @@ class CollectionTest < ActiveSupport::TestCase end test "auto-create version after idle setting" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 600 # 10 minutes + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 600 # 10 minutes act_as_user users(:active) do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -188,8 +188,8 @@ class CollectionTest < ActiveSupport::TestCase end test "preserve_version=false assignment is ignored while being true and not producing a new version" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 3600 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 3600 act_as_user users(:active) do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -244,8 +244,8 @@ class CollectionTest < ActiveSupport::TestCase end test "uuid updates on current version make older versions update their pointers" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_system_user do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -267,8 +267,8 @@ class CollectionTest < ActiveSupport::TestCase end test "older versions' modified_at indicate when they're created" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_user users(:active) do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -301,8 +301,8 @@ class CollectionTest < ActiveSupport::TestCase end test "past versions should not be directly updatable" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_system_user do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -324,7 +324,7 @@ class CollectionTest < ActiveSupport::TestCase assert c_old.invalid? c_old.reload # Now disable collection versioning, it should behave the same way - Rails.configuration.collection_versioning = false + Rails.configuration.Collections.CollectionVersioning = false c_old.name = 'this was foo' assert c_old.invalid? end @@ -337,8 +337,8 @@ class CollectionTest < ActiveSupport::TestCase ['is_trashed', true, false], ].each do |attr, first_val, second_val| test "sync #{attr} with older versions" do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_system_user do # Set up initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -379,8 +379,8 @@ class CollectionTest < ActiveSupport::TestCase [false, 'replication_desired', 5, false], ].each do |versioning, attr, val, new_version_expected| test "update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version" do - Rails.configuration.collection_versioning = versioning - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = versioning + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_user users(:active) do # Create initial collection c = create_collection 'foo', Encoding::US_ASCII @@ -414,8 +414,8 @@ class CollectionTest < ActiveSupport::TestCase end test 'current_version_uuid is ignored during update' do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_user users(:active) do # Create 1st collection col1 = create_collection 'foo', Encoding::US_ASCII @@ -439,8 +439,8 @@ class CollectionTest < ActiveSupport::TestCase end test 'with versioning enabled, simultaneous updates increment version correctly' do - Rails.configuration.collection_versioning = true - Rails.configuration.preserve_version_if_idle = 0 + Rails.configuration.Collections.CollectionVersioning = true + Rails.configuration.Collections.PreserveVersionIfIdle = 0 act_as_user users(:active) do # Create initial collection col = create_collection 'foo', Encoding::US_ASCII @@ -654,7 +654,7 @@ class CollectionTest < ActiveSupport::TestCase [0, 2, 4, nil].each do |ask| test "set replication_desired to #{ask.inspect}" do - Rails.configuration.default_collection_replication = 2 + Rails.configuration.Collections.DefaultReplication = 2 act_as_user users(:active) do c = collections(:replication_undesired_unconfirmed) c.update_attributes replication_desired: ask @@ -760,7 +760,7 @@ class CollectionTest < ActiveSupport::TestCase name: 'foo', trash_at: db_current_time + 1.years) sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i - expect_max_sig_exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl + expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL assert_operator c.trash_at.to_i, :>, expect_max_sig_exp assert_operator sig_exp.to_i, :<=, expect_max_sig_exp end @@ -849,7 +849,7 @@ class CollectionTest < ActiveSupport::TestCase test test_name do act_as_user users(:active) do min_exp = (db_current_time + - Rails.configuration.blob_signature_ttl.seconds) + Rails.configuration.Collections.BlobSigningTTL.seconds) if fixture_name == :expired_collection # Fixture-finder shorthand doesn't find trashed collections # because they're not in the default scope. @@ -890,7 +890,7 @@ class CollectionTest < ActiveSupport::TestCase end test 'default trash interval > blob signature ttl' do - Rails.configuration.default_trash_lifetime = 86400 * 21 # 3 weeks + Rails.configuration.Collections.DefaultTrashLifetime = 86400 * 21 # 3 weeks start = db_current_time act_as_user users(:active) do c = Collection.create!(manifest_text: '', name: 'foo') diff --git a/services/api/test/unit/commit_test.rb b/services/api/test/unit/commit_test.rb index af365b19e2..c5d72c3bfe 100644 --- a/services/api/test/unit/commit_test.rb +++ b/services/api/test/unit/commit_test.rb @@ -78,7 +78,7 @@ class CommitTest < ActiveSupport::TestCase test 'tag_in_internal_repository creates and updates tags in internal.git' do authorize_with :active - gitint = "git --git-dir #{Rails.configuration.git_internal_dir}" + gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir}" IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1")) refute $?.success? @@ -88,7 +88,7 @@ class CommitTest < ActiveSupport::TestCase end def with_foo_repository - Dir.chdir("#{Rails.configuration.git_repositories_dir}/#{repositories(:foo).uuid}") do + Dir.chdir("#{Rails.configuration.Git.Repositories}/#{repositories(:foo).uuid}") do must_pipe("git checkout master 2>&1") yield end @@ -107,7 +107,7 @@ class CommitTest < ActiveSupport::TestCase must_pipe("git -c user.email=x@x -c user.name=X commit -m -") end Commit.tag_in_internal_repository 'active/foo', sha1, tag - gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}" + gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}" assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}")) assert $?.success? end @@ -123,7 +123,7 @@ class CommitTest < ActiveSupport::TestCase must_pipe("git reset --hard HEAD^") end Commit.tag_in_internal_repository 'active/foo', sha1, tag - gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}" + gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}" assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}")) assert $?.success? end diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb index 5c4a56c2c5..5174e6cf42 100644 --- a/services/api/test/unit/container_request_test.rb +++ b/services/api/test/unit/container_request_test.rb @@ -514,7 +514,7 @@ class ContainerRequestTest < ActiveSupport::TestCase test "Container.resolve_container_image(pdh)" do set_user_from_auth :active [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver| - Rails.configuration.docker_image_formats = [ver] + Rails.configuration.Containers.SupportedDockerImageFormats = [ver] pdh = collections(coll).portable_data_hash resolved = Container.resolve_container_image(pdh) assert_equal resolved, pdh @@ -535,12 +535,12 @@ class ContainerRequestTest < ActiveSupport::TestCase test "allow unrecognized container when there are remote_hosts" do set_user_from_auth :active - Rails.configuration.remote_hosts = {"foooo" => "bar.com"} + Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({foooo: ActiveSupport::InheritableOptions.new({Host: "bar.com"})}) Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3') end test "migrated docker image" do - Rails.configuration.docker_image_formats = ['v2'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v2'] add_docker19_migration_link # Test that it returns only v2 images even though request is for v1 image. @@ -558,7 +558,7 @@ class ContainerRequestTest < ActiveSupport::TestCase end test "use unmigrated docker image" do - Rails.configuration.docker_image_formats = ['v1'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v1'] add_docker19_migration_link # Test that it returns only supported v1 images even though there is a @@ -577,7 +577,7 @@ class ContainerRequestTest < ActiveSupport::TestCase end test "incompatible docker image v1" do - Rails.configuration.docker_image_formats = ['v1'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v1'] add_docker19_migration_link # Don't return unsupported v2 image even if we ask for it directly. @@ -590,7 +590,7 @@ class ContainerRequestTest < ActiveSupport::TestCase end test "incompatible docker image v2" do - Rails.configuration.docker_image_formats = ['v2'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v2'] # No migration link, don't return unsupported v1 image, set_user_from_auth :active @@ -836,7 +836,7 @@ class ContainerRequestTest < ActiveSupport::TestCase assert_not_nil(trash) assert_not_nil(delete) assert_in_delta(trash, now + 1.second, 10) - assert_in_delta(delete, now + Rails.configuration.blob_signature_ttl.second, 10) + assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL.second, 10) end def check_output_ttl_1y(now, trash, delete) @@ -884,7 +884,7 @@ class ContainerRequestTest < ActiveSupport::TestCase [false, ActiveRecord::RecordInvalid], [true, nil], ].each do |preemptible_conf, expected| - test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do + test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do sp = {"preemptible" => true} common_attrs = {cwd: "test", priority: 1, @@ -892,7 +892,7 @@ class ContainerRequestTest < ActiveSupport::TestCase output_path: "test", scheduling_parameters: sp, mounts: {"test" => {"kind" => "json"}}} - Rails.configuration.preemptible_instances = preemptible_conf + Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf set_user_from_auth :active cr = create_minimal_req!(common_attrs) @@ -921,7 +921,7 @@ class ContainerRequestTest < ActiveSupport::TestCase scheduling_parameters: {"preemptible" => false}, mounts: {"test" => {"kind" => "json"}}} - Rails.configuration.preemptible_instances = true + Rails.configuration.Containers.UsePreemptibleInstances = true set_user_from_auth :active if requesting_c @@ -946,14 +946,14 @@ class ContainerRequestTest < ActiveSupport::TestCase [false, 'zzzzz-dz642-runningcontainr', nil], [false, nil, nil], ].each do |preemptible_conf, requesting_c, schedule_preemptible| - test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do + test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do common_attrs = {cwd: "test", priority: 1, command: ["echo", "hello"], output_path: "test", mounts: {"test" => {"kind" => "json"}}} - Rails.configuration.preemptible_instances = preemptible_conf + Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf set_user_from_auth :active if requesting_c @@ -1017,7 +1017,7 @@ class ContainerRequestTest < ActiveSupport::TestCase state: ContainerRequest::Committed, mounts: {"test" => {"kind" => "json"}}} set_user_from_auth :active - Rails.configuration.preemptible_instances = true + Rails.configuration.Containers.UsePreemptibleInstances = true cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do create_minimal_req!(common_attrs) diff --git a/services/api/test/unit/container_test.rb b/services/api/test/unit/container_test.rb index 5ce3739a36..88fd5feb6a 100644 --- a/services/api/test/unit/container_test.rb +++ b/services/api/test/unit/container_test.rb @@ -184,7 +184,7 @@ class ContainerTest < ActiveSupport::TestCase assert_equal c1.runtime_status, {} assert_equal Container::Queued, c1.state - assert_raises ActiveRecord::RecordInvalid do + assert_raises ArvadosModel::PermissionDeniedError do c1.update_attributes! runtime_status: {'error' => 'Oops!'} end @@ -241,7 +241,7 @@ class ContainerTest < ActiveSupport::TestCase end test "find_reusable method should select higher priority queued container" do - Rails.configuration.log_reuse_decisions = true + Rails.configuration.Containers.LogReuseDecisions = true set_user_from_auth :active common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}}) c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1})) @@ -511,7 +511,7 @@ class ContainerTest < ActiveSupport::TestCase test "find_reusable with logging enabled" do set_user_from_auth :active - Rails.configuration.log_reuse_decisions = true + Rails.configuration.Containers.LogReuseDecisions = true Rails.logger.expects(:info).at_least(3) Container.find_reusable(REUSABLE_COMMON_ATTRS) end @@ -666,7 +666,7 @@ class ContainerTest < ActiveSupport::TestCase end test "Exceed maximum lock-unlock cycles" do - Rails.configuration.max_container_dispatch_attempts = 3 + Rails.configuration.Containers.MaxDispatchAttempts = 3 set_user_from_auth :active c, cr = minimal_new @@ -777,6 +777,51 @@ class ContainerTest < ActiveSupport::TestCase end end + [ + [Container::Queued, {state: Container::Locked}], + [Container::Queued, {state: Container::Running}], + [Container::Queued, {state: Container::Complete}], + [Container::Queued, {state: Container::Cancelled}], + [Container::Queued, {priority: 123456789}], + [Container::Queued, {runtime_status: {'error' => 'oops'}}], + [Container::Queued, {cwd: '/'}], + [Container::Locked, {state: Container::Running}], + [Container::Locked, {state: Container::Queued}], + [Container::Locked, {priority: 123456789}], + [Container::Locked, {runtime_status: {'error' => 'oops'}}], + [Container::Locked, {cwd: '/'}], + [Container::Running, {state: Container::Complete}], + [Container::Running, {state: Container::Cancelled}], + [Container::Running, {priority: 123456789}], + [Container::Running, {runtime_status: {'error' => 'oops'}}], + [Container::Running, {cwd: '/'}], + [Container::Complete, {state: Container::Cancelled}], + [Container::Complete, {priority: 123456789}], + [Container::Complete, {runtime_status: {'error' => 'oops'}}], + [Container::Complete, {cwd: '/'}], + [Container::Cancelled, {cwd: '/'}], + ].each do |start_state, updates| + test "Container update #{updates.inspect} when #{start_state} forbidden for non-admin" do + set_user_from_auth :active + c, _ = minimal_new + if start_state != Container::Queued + set_user_from_auth :dispatch1 + c.lock + if start_state != Container::Locked + c.update_attributes! state: Container::Running + if start_state != Container::Running + c.update_attributes! state: start_state + end + end + end + assert_equal c.state, start_state + set_user_from_auth :active + assert_raises(ArvadosModel::PermissionDeniedError) do + c.update_attributes! updates + end + end + end + test "Container only set exit code on complete" do set_user_from_auth :active c, _ = minimal_new @@ -899,7 +944,9 @@ class ContainerTest < ActiveSupport::TestCase c.update_attributes! state: Container::Running set_user_from_auth :running_to_be_deleted_container_auth - refute c.update_attributes(output: collections(:foo_file).portable_data_hash) + assert_raises(ArvadosModel::PermissionDeniedError) do + c.update_attributes(output: collections(:foo_file).portable_data_hash) + end end test "can set trashed output on running container" do diff --git a/services/api/test/unit/crunch_dispatch_test.rb b/services/api/test/unit/crunch_dispatch_test.rb index 42ef0d160c..3a8f90a66b 100644 --- a/services/api/test/unit/crunch_dispatch_test.rb +++ b/services/api/test/unit/crunch_dispatch_test.rb @@ -99,7 +99,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase test 'override --cgroup-root with CRUNCH_CGROUP_ROOT' do ENV['CRUNCH_CGROUP_ROOT'] = '/path/to/cgroup' - Rails.configuration.crunch_job_wrapper = :none + Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "none" act_as_system_user do j = Job.create(repository: 'active/foo', script: 'hash', @@ -140,7 +140,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase test 'rate limit of partial line segments' do act_as_system_user do - Rails.configuration.crunch_log_partial_line_throttle_period = 1 + Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod = 1 job = {} job[:bytes_logged] = 0 @@ -197,7 +197,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase end test 'scancel orphaned job nodes' do - Rails.configuration.crunch_job_wrapper = :slurm_immediate + Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate" act_as_system_user do dispatch = CrunchDispatch.new diff --git a/services/api/test/unit/fail_jobs_test.rb b/services/api/test/unit/fail_jobs_test.rb index 3c7f9a9096..304335c6f0 100644 --- a/services/api/test/unit/fail_jobs_test.rb +++ b/services/api/test/unit/fail_jobs_test.rb @@ -40,8 +40,8 @@ class FailJobsTest < ActiveSupport::TestCase end test 'cancel slurm jobs' do - Rails.configuration.crunch_job_wrapper = :slurm_immediate - Rails.configuration.crunch_job_user = 'foobar' + Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate" + Rails.configuration.Containers.JobsAPI.CrunchJobUser = 'foobar' fake_squeue = IO.popen("echo #{@job[:before_reboot].uuid}") fake_scancel = IO.popen("true") IO.expects(:popen). @@ -55,7 +55,7 @@ class FailJobsTest < ActiveSupport::TestCase end test 'use reboot time' do - Rails.configuration.crunch_job_wrapper = nil + Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = nil @dispatch.expects(:open).once.with('/proc/stat'). returns open(Rails.root.join('test/fixtures/files/proc_stat')) @dispatch.fail_jobs(before: 'reboot') diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb index fcbd1722f3..f47a1c10f9 100644 --- a/services/api/test/unit/job_test.rb +++ b/services/api/test/unit/job_test.rb @@ -90,7 +90,7 @@ class JobTest < ActiveSupport::TestCase ].each do |use_config| test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do default_docker_image = collections(:docker_image)[:portable_data_hash] - Rails.configuration.default_docker_image_for_jobs = default_docker_image if use_config + Rails.configuration.Containers.JobsAPI.DefaultDockerImage = default_docker_image if use_config job = Job.new job_attrs assert job.valid?, job.errors.full_messages.to_s @@ -127,10 +127,10 @@ class JobTest < ActiveSupport::TestCase 'locator' => BAD_COLLECTION, }.each_pair do |spec_type, image_spec| test "Job validation fails with nonexistent Docker image #{spec_type}" do - Rails.configuration.remote_hosts = {} + Rails.configuration.RemoteClusters = {} job = Job.new job_attrs(runtime_constraints: {'docker_image' => image_spec}) - assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid") + assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid") end end @@ -426,7 +426,7 @@ class JobTest < ActiveSupport::TestCase end test "use migrated docker image if requesting old-format image by tag" do - Rails.configuration.docker_image_formats = ['v2'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v2'] add_docker19_migration_link job = Job.create!( job_attrs( @@ -438,7 +438,7 @@ class JobTest < ActiveSupport::TestCase end test "use migrated docker image if requesting old-format image by pdh" do - Rails.configuration.docker_image_formats = ['v2'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v2'] add_docker19_migration_link job = Job.create!( job_attrs( @@ -455,7 +455,7 @@ class JobTest < ActiveSupport::TestCase [:docker_image_1_12, :docker_image_1_12, :docker_image_1_12], ].each do |existing_image, request_image, expect_image| test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do - Rails.configuration.docker_image_formats = ['v1'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v1'] if existing_image == :docker_image oldjob = Job.create!( @@ -477,7 +477,7 @@ class JobTest < ActiveSupport::TestCase end end - Rails.configuration.docker_image_formats = ['v2'] + Rails.configuration.Containers.SupportedDockerImageFormats = ['v2'] add_docker19_migration_link # Check that both v1 and v2 images get resolved to v2. @@ -568,7 +568,7 @@ class JobTest < ActiveSupport::TestCase end test 'find_reusable with logging' do - Rails.configuration.log_reuse_decisions = true + Rails.configuration.Containers.LogReuseDecisions = true Rails.logger.expects(:info).at_least(3) try_find_reusable end @@ -595,7 +595,7 @@ class JobTest < ActiveSupport::TestCase assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)]) # ...unless config says to reuse the earlier job in such cases. - Rails.configuration.reuse_job_if_outputs_differ = true + Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer = true j = Job.find_reusable(example_attrs, {}, [], [users(:active)]) assert_equal foobar.uuid, j.uuid end @@ -648,33 +648,32 @@ class JobTest < ActiveSupport::TestCase end test 'enable legacy api configuration option = true' do - Rails.configuration.enable_legacy_jobs_api = true + Rails.configuration.Containers.JobsAPI.Enable = "true" check_enable_legacy_jobs_api - assert_equal [], Rails.configuration.disable_api_methods + assert_equal [], Rails.configuration.API.DisabledAPIs end test 'enable legacy api configuration option = false' do - Rails.configuration.enable_legacy_jobs_api = false + Rails.configuration.Containers.JobsAPI.Enable = "false" check_enable_legacy_jobs_api - assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods + assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs end test 'enable legacy api configuration option = auto, has jobs' do - Rails.configuration.enable_legacy_jobs_api = "auto" + Rails.configuration.Containers.JobsAPI.Enable = "auto" assert Job.count > 0 - assert_equal [], Rails.configuration.disable_api_methods check_enable_legacy_jobs_api - assert_equal [], Rails.configuration.disable_api_methods + assert_equal [], Rails.configuration.API.DisabledAPIs end test 'enable legacy api configuration option = auto, no jobs' do - Rails.configuration.enable_legacy_jobs_api = "auto" + Rails.configuration.Containers.JobsAPI.Enable = "auto" act_as_system_user do Job.destroy_all end assert_equal 0, Job.count - assert_equal [], Rails.configuration.disable_api_methods + assert_equal [], Rails.configuration.API.DisabledAPIs check_enable_legacy_jobs_api - assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods + assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs end end diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb index 5a78f25235..8a878ada91 100644 --- a/services/api/test/unit/log_test.rb +++ b/services/api/test/unit/log_test.rb @@ -282,7 +282,7 @@ class LogTest < ActiveSupport::TestCase end test "non-empty configuration.unlogged_attributes" do - Rails.configuration.unlogged_attributes = ["manifest_text"] + Rails.configuration.AuditLogs.UnloggedAttributes = ["manifest_text"] txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n" act_as_system_user do @@ -297,7 +297,7 @@ class LogTest < ActiveSupport::TestCase end test "empty configuration.unlogged_attributes" do - Rails.configuration.unlogged_attributes = [] + Rails.configuration.AuditLogs.UnloggedAttributes = [] txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n" act_as_system_user do @@ -332,8 +332,8 @@ class LogTest < ActiveSupport::TestCase test 'retain old audit logs with default settings' do assert_no_logs_deleted do AuditLogs.delete_old( - max_age: Rails.configuration.max_audit_log_age, - max_batch: Rails.configuration.max_audit_log_delete_batch) + max_age: Rails.configuration.AuditLogs.MaxAge, + max_batch: Rails.configuration.AuditLogs.MaxDeleteBatch) end end @@ -362,8 +362,8 @@ class LogTest < ActiveSupport::TestCase test 'delete old audit logs in thread' do begin - Rails.configuration.max_audit_log_age = 20 - Rails.configuration.max_audit_log_delete_batch = 100000 + Rails.configuration.AuditLogs.MaxAge = 20 + Rails.configuration.AuditLogs.MaxDeleteBatch = 100000 Rails.cache.delete 'AuditLogs' initial_log_count = Log.unscoped.all.count + 1 act_as_system_user do diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb index 4cb7a0a1b1..b54e8d9de6 100644 --- a/services/api/test/unit/node_test.rb +++ b/services/api/test/unit/node_test.rb @@ -34,8 +34,8 @@ class NodeTest < ActiveSupport::TestCase end test "dns_server_conf_template" do - Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp' - Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template' + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp' + Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template' conffile = Rails.root.join 'tmp', 'compute65535.conf' File.unlink conffile rescue nil assert Node.dns_server_update 'compute65535', '127.0.0.1' @@ -44,8 +44,8 @@ class NodeTest < ActiveSupport::TestCase end test "dns_server_restart_command" do - Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp' - Rails.configuration.dns_server_reload_command = 'foobar' + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp' + Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar' restartfile = Rails.root.join 'tmp', 'restart.txt' File.unlink restartfile rescue nil assert Node.dns_server_update 'compute65535', '127.0.0.127' @@ -54,14 +54,14 @@ class NodeTest < ActiveSupport::TestCase end test "dns_server_restart_command fail" do - Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp', 'bogusdir' - Rails.configuration.dns_server_reload_command = 'foobar' + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp', 'bogusdir' + Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar' refute Node.dns_server_update 'compute65535', '127.0.0.127' end test "dns_server_update_command with valid command" do testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt') - Rails.configuration.dns_server_update_command = + Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = ('echo -n "%{hostname} == %{ip_address}" >' + testfile.to_s.shellescape) assert Node.dns_server_update 'compute65535', '127.0.0.1' @@ -70,23 +70,23 @@ class NodeTest < ActiveSupport::TestCase end test "dns_server_update_command with failing command" do - Rails.configuration.dns_server_update_command = 'false %{hostname}' + Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = 'false %{hostname}' refute Node.dns_server_update 'compute65535', '127.0.0.1' end test "dns update with no commands/dirs configured" do - Rails.configuration.dns_server_update_command = false - Rails.configuration.dns_server_conf_dir = false - Rails.configuration.dns_server_conf_template = 'ignored!' - Rails.configuration.dns_server_reload_command = 'ignored!' + Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = "" + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = "" + Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = 'ignored!' + Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'ignored!' assert Node.dns_server_update 'compute65535', '127.0.0.127' end test "don't leave temp files behind if there's an error writing them" do - Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template' + Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template' Tempfile.any_instance.stubs(:puts).raises(IOError) Dir.mktmpdir do |tmpdir| - Rails.configuration.dns_server_conf_dir = tmpdir + Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = tmpdir refute Node.dns_server_update 'compute65535', '127.0.0.127' assert_empty Dir.entries(tmpdir).select{|f| File.file? f} end @@ -100,14 +100,14 @@ class NodeTest < ActiveSupport::TestCase end test "ping new node with no hostname and no config" do - Rails.configuration.assign_node_hostname = false + Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false node = ping_node(:new_with_no_hostname, {}) refute_nil node.slot_number assert_nil node.hostname end test "ping new node with zero padding config" do - Rails.configuration.assign_node_hostname = 'compute%04d' + Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%04d' node = ping_node(:new_with_no_hostname, {}) slot_number = node.slot_number refute_nil slot_number @@ -121,7 +121,7 @@ class NodeTest < ActiveSupport::TestCase end test "ping node with hostname and no config and expect hostname unchanged" do - Rails.configuration.assign_node_hostname = false + Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false node = ping_node(:new_with_custom_hostname, {}) assert_equal(23, node.slot_number) assert_equal("custom1", node.hostname) @@ -196,13 +196,13 @@ class NodeTest < ActiveSupport::TestCase end test 'run out of slots' do - Rails.configuration.max_compute_nodes = 3 + Rails.configuration.Containers.MaxComputeVMs = 3 act_as_system_user do Node.destroy_all (1..4).each do |i| n = Node.create! args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] } - if i <= Rails.configuration.max_compute_nodes + if i <= Rails.configuration.Containers.MaxComputeVMs n.ping(args) else assert_raises do diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb index fa4c37f74f..cb562ef977 100644 --- a/services/api/test/unit/repository_test.rb +++ b/services/api/test/unit/repository_test.rb @@ -23,15 +23,15 @@ class RepositoryTest < ActiveSupport::TestCase def default_git_url(repo_name, user_name=nil) if user_name "git@git.%s.arvadosapi.com:%s/%s.git" % - [Rails.configuration.uuid_prefix, user_name, repo_name] + [Rails.configuration.ClusterID, user_name, repo_name] else "git@git.%s.arvadosapi.com:%s.git" % - [Rails.configuration.uuid_prefix, repo_name] + [Rails.configuration.ClusterID, repo_name] end end def assert_server_path(path_tail, repo_sym) - assert_equal(File.join(Rails.configuration.git_repositories_dir, path_tail), + assert_equal(File.join(Rails.configuration.Git.Repositories, path_tail), repositories(repo_sym).server_path) end diff --git a/services/api/test/unit/user_notifier_test.rb b/services/api/test/unit/user_notifier_test.rb index 008259c0b6..f409d231f1 100644 --- a/services/api/test/unit/user_notifier_test.rb +++ b/services/api/test/unit/user_notifier_test.rb @@ -14,12 +14,12 @@ class UserNotifierTest < ActionMailer::TestCase assert_not_nil email # Test the body of the sent email contains what we expect it to - assert_equal Rails.configuration.user_notifier_email_from, email.from.first + assert_equal Rails.configuration.Users.UserNotifierEmailFrom, email.from.first assert_equal user.email, email.to.first assert_equal 'Welcome to Arvados - shell account enabled', email.subject assert (email.body.to_s.include? 'Your Arvados shell account has been set up'), 'Expected Your Arvados shell account has been set up in email body' - assert (email.body.to_s.include? Rails.configuration.workbench_address), + assert (email.body.to_s.include? Rails.configuration.Services.Workbench1.ExternalURL.to_s), 'Expected workbench url in email body' end diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb index 67c410047c..185653e873 100644 --- a/services/api/test/unit/user_test.rb +++ b/services/api/test/unit/user_test.rb @@ -110,7 +110,7 @@ class UserTest < ActiveSupport::TestCase end test "new username set avoiding blacklist" do - Rails.configuration.auto_setup_name_blacklist = ["root"] + Rails.configuration.Users.AutoSetupUsernameBlacklist = ["root"] check_new_username_setting("root", "root2") end @@ -157,8 +157,8 @@ class UserTest < ActiveSupport::TestCase [false, 'bar@example.com', nil, true], [true, 'foo@example.com', true, nil], [true, 'bar@example.com', true, true], - [false, false, nil, nil], - [true, false, true, nil] + [false, '', nil, nil], + [true, '', true, nil] ].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin| # In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'. test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do @@ -170,8 +170,8 @@ class UserTest < ActiveSupport::TestCase assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)" end - Rails.configuration.auto_admin_first_user = auto_admin_first_user_config - Rails.configuration.auto_admin_user = auto_admin_user_config + Rails.configuration.Users.AutoAdminFirstUser = auto_admin_first_user_config + Rails.configuration.Users.AutoAdminUserWithEmail = auto_admin_user_config # See if the foo user has is_admin foo = User.new @@ -384,15 +384,15 @@ class UserTest < ActiveSupport::TestCase test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do set_user_from_auth :admin - Rails.configuration.auto_setup_new_users = true + Rails.configuration.Users.AutoSetupNewUsers = true if auto_setup_vm - Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid'] + Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = virtual_machines(:testvm)['uuid'] else - Rails.configuration.auto_setup_new_users_with_vm_uuid = false + Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = "" end - Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo + Rails.configuration.Users.AutoSetupNewUsersWithRepository = auto_setup_repo create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username end @@ -625,12 +625,12 @@ class UserTest < ActiveSupport::TestCase end def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username) - Rails.configuration.new_user_notification_recipients = new_user_recipients - Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients + Rails.configuration.Users.NewUserNotificationRecipients = new_user_recipients + Rails.configuration.Users.NewInactiveUserNotificationRecipients = inactive_recipients ActionMailer::Base.deliveries = [] - can_setup = (Rails.configuration.auto_setup_new_users and + can_setup = (Rails.configuration.Users.AutoSetupNewUsers and (not expect_username.nil?)) expect_repo_name = "#{expect_username}/#{expect_username}" prior_repo = Repository.where(name: expect_repo_name).first @@ -643,21 +643,21 @@ class UserTest < ActiveSupport::TestCase assert_equal(expect_username, user.username) # check user setup - verify_link_exists(Rails.configuration.auto_setup_new_users || active, + verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active, groups(:all_users).uuid, user.uuid, "permission", "can_read") # Check for OID login link. - verify_link_exists(Rails.configuration.auto_setup_new_users || active, + verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active, user.uuid, user.email, "permission", "can_login") # Check for repository. if named_repo = (prior_repo or Repository.where(name: expect_repo_name).first) verify_link_exists((can_setup and prior_repo.nil? and - Rails.configuration.auto_setup_new_users_with_repository), + Rails.configuration.Users.AutoSetupNewUsersWithRepository), named_repo.uuid, user.uuid, "permission", "can_manage") end # Check for VM login. - if auto_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid + if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != "" verify_link_exists(can_setup, auto_vm_uuid, user.uuid, "permission", "can_login", "username", expect_username) end @@ -666,17 +666,17 @@ class UserTest < ActiveSupport::TestCase new_user_email = nil new_inactive_user_email = nil - new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification" - if Rails.configuration.auto_setup_new_users + new_user_email_subject = "#{Rails.configuration.Users.EmailSubjectPrefix}New user created notification" + if Rails.configuration.Users.AutoSetupNewUsers new_user_email_subject = (expect_username or active) ? - "#{Rails.configuration.email_subject_prefix}New user created and setup notification" : - "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification" + "#{Rails.configuration.Users.EmailSubjectPrefix}New user created and setup notification" : + "#{Rails.configuration.Users.EmailSubjectPrefix}New user created, but not setup notification" end ActionMailer::Base.deliveries.each do |d| if d.subject == new_user_email_subject then new_user_email = d - elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then + elsif d.subject == "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification" then new_inactive_user_email = d end end @@ -685,7 +685,7 @@ class UserTest < ActiveSupport::TestCase # if the new user email recipients config parameter is set if not new_user_recipients.empty? then assert_not_nil new_user_email, 'Expected new user email after setup' - assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0] + assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0] assert_equal new_user_recipients, new_user_email.to[0] assert_equal new_user_email_subject, new_user_email.subject else @@ -695,9 +695,9 @@ class UserTest < ActiveSupport::TestCase if not active if not inactive_recipients.empty? then assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup' - assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0] + assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0] assert_equal inactive_recipients, new_inactive_user_email.to[0] - assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject + assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject else assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup' end diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox index 74933718c7..878119634b 100755 --- a/tools/arvbox/bin/arvbox +++ b/tools/arvbox/bin/arvbox @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: AGPL-3.0 @@ -64,7 +64,7 @@ GOSTUFF="$ARVBOX_DATA/gopath" RLIBS="$ARVBOX_DATA/Rlibs" getip() { - docker inspect $ARVBOX_CONTAINER | grep \"IPAddress\" | head -n1 | tr -d ' ":,\n' | cut -c10- + docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $ARVBOX_CONTAINER } gethost() { @@ -103,8 +103,11 @@ wait_for_arvbox() { docker logs -f $ARVBOX_CONTAINER > $FF & LOGPID=$! while read line ; do - if echo $line | grep "ok: down: ready:" >/dev/null ; then + if [[ $line =~ "ok: down: ready:" ]] ; then kill $LOGPID + set +e + wait $LOGPID 2>/dev/null + set -e else echo $line fi @@ -132,9 +135,14 @@ docker_run_dev() { "--volume=$NPMCACHE:/var/lib/npm:rw" \ "--volume=$GOSTUFF:/var/lib/gopath:rw" \ "--volume=$RLIBS:/var/lib/Rlibs:rw" \ + --label "org.arvados.arvbox_config=$CONFIG" \ "$@" } +running_config() { + docker inspect $ARVBOX_CONTAINER -f '{{index .Config.Labels "org.arvados.arvbox_config"}}' +} + run() { CONFIG=$1 TAG=$2 @@ -144,18 +152,22 @@ run() { need_setup=1 if docker ps -a --filter "status=running" | grep -E "$ARVBOX_CONTAINER$" -q ; then - if test "$CONFIG" = test ; then + if [[ $(running_config) != "$CONFIG" ]] ; then + echo "Container $ARVBOX_CONTAINER is '$(running_config)' config but requested '$CONFIG'; use restart or reboot" + return 1 + fi + if test "$CONFIG" = test -o "$CONFIG" = devenv ; then need_setup=0 else echo "Container $ARVBOX_CONTAINER is already running" - exit 0 + return 0 fi fi if test $need_setup = 1 ; then if docker ps -a | grep -E "$ARVBOX_CONTAINER$" -q ; then echo "Container $ARVBOX_CONTAINER already exists but is not running; use restart or reboot" - exit 1 + return 1 fi fi @@ -165,11 +177,14 @@ run() { TAG=":$TAG" shift else + if [[ $TAG = '-' ]] ; then + shift + fi unset TAG fi fi - if echo "$CONFIG" | grep '^public' ; then + if [[ "$CONFIG" =~ ^public ]] ; then if test -n "$ARVBOX_PUBLISH_IP" ; then localip=$ARVBOX_PUBLISH_IP else @@ -195,10 +210,10 @@ run() { PUBLIC="" fi - if echo "$CONFIG" | grep 'demo$' ; then + if [[ "$CONFIG" =~ demo$ ]] ; then if test -d "$ARVBOX_DATA" ; then echo "It looks like you already have a development container named $ARVBOX_CONTAINER." - echo "Set ARVBOX_CONTAINER to set a different name for your demo container" + echo "Set environment variable ARVBOX_CONTAINER to set a different name for your demo container" exit 1 fi @@ -211,6 +226,7 @@ run() { --name=$ARVBOX_CONTAINER \ --privileged \ --volumes-from $ARVBOX_CONTAINER-data \ + --label "org.arvados.arvbox_config=$CONFIG" \ $PUBLIC \ arvados/arvbox-demo$TAG updateconf @@ -218,7 +234,6 @@ run() { else mkdir -p "$PG_DATA" "$VAR_DATA" "$PASSENGER" "$GEMS" "$PIPCACHE" "$NPMCACHE" "$GOSTUFF" "$RLIBS" - if ! test -d "$ARVADOS_ROOT" ; then git clone https://github.com/curoverse/arvados.git "$ARVADOS_ROOT" fi @@ -232,7 +247,7 @@ run() { git clone https://github.com/curoverse/arvados-workbench2.git "$WORKBENCH2_ROOT" fi - if test "$CONFIG" = test ; then + if [[ "$CONFIG" = test ]] ; then mkdir -p $VAR_DATA/test @@ -261,14 +276,36 @@ run() { fi docker exec -ti \ + -e LINES=$(tput lines) \ + -e COLUMNS=$(tput cols) \ + -e TERM=$TERM \ + -e WORKSPACE=/usr/src/arvados \ + -e GEM_HOME=/var/lib/gems \ $ARVBOX_CONTAINER \ /usr/local/lib/arvbox/runsu.sh \ /usr/src/arvados/build/run-tests.sh \ --temp /var/lib/arvados/test \ - WORKSPACE=/usr/src/arvados \ - GEM_HOME=/var/lib/gems \ "$@" - elif echo "$CONFIG" | grep 'dev$' ; then + elif [[ "$CONFIG" = devenv ]] ; then + if [[ $need_setup = 1 ]] ; then + docker_run_dev \ + --detach \ + --name=${ARVBOX_CONTAINER} \ + "--env=SVDIR=/etc/devenv-service" \ + "--volume=$HOME:$HOME:rw" \ + --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \ + arvados/arvbox-dev$TAG + fi + exec docker exec --interactive --tty \ + -e LINES=$(tput lines) \ + -e COLUMNS=$(tput cols) \ + -e TERM=$TERM \ + -e "ARVBOX_HOME=$HOME" \ + -e "DISPLAY=$DISPLAY" \ + --workdir=$PWD \ + ${ARVBOX_CONTAINER} \ + /usr/local/lib/arvbox/devenv.sh "$@" + elif [[ "$CONFIG" =~ dev$ ]] ; then docker_run_dev \ --detach \ --name=$ARVBOX_CONTAINER \ @@ -344,11 +381,11 @@ build() { check() { case "$1" in - localdemo|publicdemo|dev|publicdev|test) + localdemo|publicdemo|dev|publicdev|test|devenv) true ;; *) - echo "Argument to $subcmd must be one of localdemo, publicdemo, dev, publicdev, test" + echo "Argument to $subcmd must be one of localdemo, publicdemo, dev, publicdev, test, devenv" exit 1 ;; esac @@ -375,7 +412,7 @@ case "$subcmd" in ;; sh*) - exec docker exec -ti \ + exec docker exec --interactive --tty \ -e LINES=$(tput lines) \ -e COLUMNS=$(tput cols) \ -e TERM=$TERM \ @@ -383,6 +420,17 @@ case "$subcmd" in $ARVBOX_CONTAINER /bin/bash ;; + ash*) + exec docker exec --interactive --tty \ + -e LINES=$(tput lines) \ + -e COLUMNS=$(tput cols) \ + -e TERM=$TERM \ + -e GEM_HOME=/var/lib/gems \ + -u arvbox \ + -w /usr/src/arvados \ + $ARVBOX_CONTAINER /bin/bash --login + ;; + pipe) exec docker exec -i $ARVBOX_CONTAINER /usr/bin/env GEM_HOME=/var/lib/gems /bin/bash - ;; @@ -524,63 +572,36 @@ case "$subcmd" in echo "Certificate copied to $CERT" ;; - devenv) - set -x - if docker ps -a --filter "status=exited" | grep -E "${ARVBOX_CONTAINER}-devenv$" -q ; then - docker start ${ARVBOX_CONTAINER}-devenv - elif ! (docker ps -a --filter "status=running" | grep -E "${ARVBOX_CONTAINER}-devenv$" -q) ; then - docker_run_dev \ - --detach \ - --name=${ARVBOX_CONTAINER}-devenv \ - "--env=SVDIR=/etc/devenv-service" \ - "--volume=$HOME:$HOME:rw" \ - --volume=/tmp/.X11-unix:/tmp/.X11-unix:rw \ - arvados/arvbox-dev$TAG - fi - - exec docker exec --interactive --tty \ - -e LINES=$(tput lines) \ - -e COLUMNS=$(tput cols) \ - -e TERM=$TERM \ - -e "ARVBOX_HOME=$HOME" \ - -e "DISPLAY=$DISPLAY" \ - --workdir=$PWD \ - ${ARVBOX_CONTAINER}-devenv \ - /usr/local/lib/arvbox/devenv.sh "$@" - ;; - - devenv-stop) - docker stop ${ARVBOX_CONTAINER}-devenv - ;; - - devenv-reset) - docker stop ${ARVBOX_CONTAINER}-devenv - docker rm ${ARVBOX_CONTAINER}-devenv + psql) + exec docker exec -ti $ARVBOX_CONTAINER bash -c 'PGPASSWORD=$(cat /var/lib/arvados/api_database_pw) exec psql --dbname=arvados_development --host=localhost --username=arvados' ;; *) - echo "Arvados-in-a-box http://arvados.org" + echo "Arvados-in-a-box https://doc.arvados.org/install/arvbox.html" echo - echo "start|run [tag] start $ARVBOX_CONTAINER container" - echo "stop stop arvbox container" - echo "restart stop, then run again" - echo "status print some information about current arvbox" - echo "ip print arvbox docker container ip address" - echo "host print arvbox published host" - echo "shell enter arvbox shell" - echo "open open arvbox workbench in a web browser" - echo "root-cert get copy of root certificate" - echo "update stop, pull latest image, run" - echo "build build arvbox Docker image" - echo "reboot stop, build arvbox Docker image, run" - echo "rebuild build arvbox Docker image, no layer cache" - echo "reset delete arvbox arvados data (be careful!)" - echo "destroy delete all arvbox code and data (be careful!)" - echo "log tail log of specified service" - echo "ls list directories inside arvbox" - echo "cat get contents of files inside arvbox" - echo "pipe run a bash script piped in from stdin" - echo "sv change state of service inside arvbox" - echo "clone clone an arvbox" + echo "start|run [tag] start $ARVBOX_CONTAINER container" + echo "stop stop arvbox container" + echo "restart stop, then run again" + echo "status print some information about current arvbox" + echo "ip print arvbox docker container ip address" + echo "host print arvbox published host" + echo "shell enter shell as root" + echo "ashell enter shell as 'arvbox'" + echo "psql enter postgres console" + echo "open open arvbox workbench in a web browser" + echo "root-cert get copy of root certificate" + echo "update stop, pull latest image, run" + echo "build build arvbox Docker image" + echo "reboot stop, build arvbox Docker image, run" + echo "rebuild build arvbox Docker image, no layer cache" + echo "reset delete arvbox arvados data (be careful!)" + echo "destroy delete all arvbox code and data (be careful!)" + echo "log tail log of specified service" + echo "ls list directories inside arvbox" + echo "cat get contents of files inside arvbox" + echo "pipe run a bash script piped in from stdin" + echo "sv " + echo " change state of service inside arvbox" + echo "clone clone dev arvbox" ;; esac diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.base b/tools/arvbox/lib/arvbox/docker/Dockerfile.base index 741bd33c49..65171de3d2 100644 --- a/tools/arvbox/lib/arvbox/docker/Dockerfile.base +++ b/tools/arvbox/lib/arvbox/docker/Dockerfile.base @@ -85,6 +85,14 @@ ENV NODEVERSION v8.15.1 RUN curl -L -f https://nodejs.org/dist/${NODEVERSION}/node-${NODEVERSION}-linux-x64.tar.xz | tar -C /usr/local -xJf - && \ ln -s ../node-${NODEVERSION}-linux-x64/bin/node ../node-${NODEVERSION}-linux-x64/bin/npm /usr/local/bin +ENV GRADLEVERSION 5.3.1 + +RUN cd /tmp && \ + curl -L -O https://services.gradle.org/distributions/gradle-${GRADLEVERSION}-bin.zip && \ + unzip gradle-${GRADLEVERSION}-bin.zip -d /usr/local && \ + ln -s ../gradle-${GRADLEVERSION}/bin/gradle /usr/local/bin && \ + rm gradle-${GRADLEVERSION}-bin.zip + # Set UTF-8 locale RUN echo en_US.UTF-8 UTF-8 > /etc/locale.gen && locale-gen ENV LANG en_US.UTF-8 diff --git a/tools/arvbox/lib/arvbox/docker/createusers.sh b/tools/arvbox/lib/arvbox/docker/createusers.sh index e9721fd55d..c6270457d5 100755 --- a/tools/arvbox/lib/arvbox/docker/createusers.sh +++ b/tools/arvbox/lib/arvbox/docker/createusers.sh @@ -28,10 +28,12 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then useradd --home-dir /var/lib/arvados/git --uid $HOSTUID --gid $HOSTGID --non-unique git useradd --groups docker crunch - chown arvbox:arvbox -R /usr/local /var/lib/arvados /var/lib/gems \ - /var/lib/passenger /var/lib/postgresql \ - /var/lib/nginx /var/log/nginx /etc/ssl/private \ - /var/lib/gopath /var/lib/pip /var/lib/npm + if [[ "$1" != --no-chown ]] ; then + chown arvbox:arvbox -R /usr/local /var/lib/arvados /var/lib/gems \ + /var/lib/passenger /var/lib/postgresql \ + /var/lib/nginx /var/log/nginx /etc/ssl/private \ + /var/lib/gopath /var/lib/pip /var/lib/npm + fi mkdir -p /var/lib/gems/ruby chown arvbox:arvbox -R /var/lib/gems/ruby diff --git a/tools/arvbox/lib/arvbox/docker/devenv.sh b/tools/arvbox/lib/arvbox/docker/devenv.sh index 9ab3ac4c38..4df5463f1f 100755 --- a/tools/arvbox/lib/arvbox/docker/devenv.sh +++ b/tools/arvbox/lib/arvbox/docker/devenv.sh @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: AGPL-3.0 -flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh +flock /var/lib/arvados/createusers.lock /usr/local/lib/arvbox/createusers.sh --no-chown if [[ -n "$*" ]] ; then exec su --preserve-environment arvbox -c "$*"