*by-sa-3.0.txt
*COPYING
doc/fonts/*
+doc/_includes/_config_default_yml.liquid
doc/user/cwl/federated/*
*/docker_image
docker/jobs/apt.arvados.org*.list
sdk/pam/pam-configs/arvados
sdk/python/tests/data/*
services/api/config/unbound.template
+services/api/config/config.default.yml
services/arv-web/sample-cgi-app/public/.htaccess
services/arv-web/sample-cgi-app/public/index.cgi
services/keepproxy/pkg-extras/etc/default/keepproxy
if [[ "$pkgname" != "arvados-workbench" ]]; then
exclude_list+=('config/database.yml')
fi
+ # for arvados-api-server, we need to dereference the
+ # config/config.default.yml file. There is no fpm way to do that, sadly
+ # (excluding the existing symlink and then adding the file from its source
+ # path doesn't work, sadly.
+ if [[ "$pkgname" == "arvados-api-server" ]]; then
+ mv /arvados/services/api/config/config.default.yml /arvados/services/api/config/config.default.yml.bu
+ cp -p /arvados/lib/config/config.default.yml /arvados/services/api/config/
+ exclude_list+=('config/config.default.yml.bu')
+ fi
for exclude in ${exclude_list[@]}; do
switches+=(-x "$exclude_root/$exclude")
done
-x "$exclude_root/vendor/cache-*" \
-x "$exclude_root/vendor/bundle" "$@" "$license_arg"
rm -rf "$scripts_dir"
+ # Undo the deferencing we did above
+ if [[ "$pkgname" == "arvados-api-server" ]]; then
+ rm -f /arvados/services/api/config/config.default.yml
+ mv /arvados/services/api/config/config.default.yml.bu /arvados/services/api/config/config.default.yml
+ fi
}
# Build python packages with a virtualenv built-in
admin:
- Topics:
- admin/index.html.textile.liquid
+ - Configuration:
+ - admin/config.html.textile.liquid
- Upgrading and migrations:
- admin/upgrading.html.textile.liquid
+ - admin/config-migration.html.textile.liquid
- install/migrate-docker19.html.textile.liquid
- admin/upgrade-crunch2.html.textile.liquid
- Users and Groups:
--- /dev/null
+../../lib/config/config.default.yml
\ No newline at end of file
--- /dev/null
+---
+layout: default
+navsection: admin
+title: Migrating Configuration
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+Arvados is migrating to a centralized configuration file for all components. The centralized Arvados configuration is @/etc/arvados/config.yml@. Components that support the new centralized configuration are listed below. Components not listed here do not yet support centralized configuration. During the migration period, legacy configuration files will continue to be loaded and take precedence over the centralized configuration file.
+
+h2. API server
+
+The legacy API server configuration is stored in @config/application.yml@ and @config/database.yml@. After migration to @/etc/arvados/config.yml@, both of these files should be moved out of the way and/or deleted.
+
+Change to the API server directory and use the following commands:
+
+<pre>
+$ bundle exec rake config:migrate > config.yml
+$ cp config.yml /etc/arvados/config.yml
+</pre>
+
+This will print the contents of @config.yml@ after merging with legacy @application.yml@. It may then be redirected to a file and copied to @/etc/arvados/config.yml@.
+
+If you wish to update @config.yml@ configuration by hand, or check that everything has been migrated, use @config:diff@ to print configuration items that differ between @application.yml@ and the system @config.yml@.
+
+<pre>
+$ bundle exec rake config:diff
+</pre>
+
+This command will also report if no migrations are required.
+
+h2. crunch-dispatch-slurm
+
+Currently only reads @InstanceTypes@ from centralized configuration. Still requires component-specific configuration file.
+
+h2. keepstore
+
+Currently only reads @RemoteClusters@ from centralized configuration. Still requires component-specific configuration file.
+
+h2. arvados-controller
+
+Only supports centralized config file. No migration needed.
+
+h2. arvados-dispatch-cloud
+
+Only supports centralized config file. No migration needed.
--- /dev/null
+---
+layout: default
+navsection: admin
+title: Configuration reference
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The master Arvados configuration is stored at @/etc/arvados/config.yml@
+
+See "Migrating Configuration":config-migration.html for information about migrating from legacy component-specific configuration files.
+
+{% codeblock as yaml %}
+{% include 'config_default_yml' %}
+{% endcodeblock %}
As part of story "#9945":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7.
+h4. New configuration
+
+Arvados is migrating to a centralized configuration file for all components. During the migration, legacy configuration files will continue to be loaded. See "Migrating Configuration":config-migration.html for details.
+
h3. v1.3.0 (2018-12-05)
This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections.
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
+#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.default.yml
+
+Clusters:
+ xxxxx:
+ SystemRootToken: ""
+
+ # Token to be included in all healthcheck requests. Disabled by default.
+ # Server expects request header of the format "Authorization: Bearer xxx"
+ ManagementToken: ""
+
+ Services:
+ RailsAPI:
+ InternalURLs: {}
+ GitHTTP:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepstore:
+ InternalURLs: {}
+ Controller:
+ InternalURLs: {}
+ ExternalURL: ""
+ Websocket:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepbalance:
+ InternalURLs: {}
+ GitHTTP:
+ InternalURLs: {}
+ ExternalURL: ""
+ GitSSH:
+ ExternalURL: ""
+ DispatchCloud:
+ InternalURLs: {}
+ SSO:
+ ExternalURL: ""
+ Keepproxy:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAV:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAVDownload:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepstore:
+ InternalURLs: {}
+ Composer:
+ ExternalURL: ""
+ WebShell:
+ ExternalURL: ""
+ Workbench1:
+ InternalURLs: {}
+ ExternalURL: ""
+ Workbench2:
+ ExternalURL: ""
+ PostgreSQL:
+ # max concurrent connections per arvados server daemon
+ ConnectionPool: 32
+ Connection:
+ # All parameters here are passed to the PG client library in a connection string;
+ # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+ Host: ""
+ Port: 0
+ User: ""
+ Password: ""
+ DBName: ""
+ API:
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
+ MaxRequestSize: 134217728
+
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this is only checked against a specific set of
+ # columns that tend to get large (collections.manifest_text,
+ # containers.mounts, workflows.definition). Other fields (e.g.,
+ # "properties" hashes) are not counted against this limit.
+ MaxIndexDatabaseRead: 134217728
+
+ # Maximum number of items to return when responding to a APIs that
+ # can return partial result sets using limit and offset parameters
+ # (e.g., *.index, groups.contents). If a request specifies a "limit"
+ # parameter higher than this value, this value is used instead.
+ MaxItemsPerResponse: 1000
+
+ # API methods to disable. Disabled methods are not listed in the
+ # discovery document, and respond 404 to all requests.
+ # Example: ["jobs.create", "pipeline_instances.create"]
+ DisabledAPIs: []
+
+ # Interval (seconds) between asynchronous permission view updates. Any
+ # permission-updating API called with the 'async' parameter schedules a an
+ # update on the permission view in the future, if not already scheduled.
+ AsyncPermissionsUpdateInterval: 20
+
+ # RailsSessionSecretToken is a string of alphanumeric characters
+ # used by Rails to sign session tokens. IMPORTANT: This is a
+ # site secret. It should be at least 50 characters.
+ RailsSessionSecretToken: ""
+
+ Users:
+ # Config parameters to automatically setup new users. If enabled,
+ # this users will be able to self-activate. Enable this if you want
+ # to run an open instance where anyone can create an account and use
+ # the system without requiring manual approval.
+ #
+ # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+ # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+ AutoSetupNewUsers: false
+ AutoSetupNewUsersWithVmUUID: ""
+ AutoSetupNewUsersWithRepository: false
+ AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+ # When new_users_are_active is set to true, new users will be active
+ # immediately. This skips the "self-activate" step which enforces
+ # user agreements. Should only be enabled for development.
+ NewUsersAreActive: false
+
+ # The e-mail address of the user you would like to become marked as an admin
+ # user on their first login.
+ # In the default configuration, authentication happens through the Arvados SSO
+ # server, which uses OAuth2 against Google's servers, so in that case this
+ # should be an address associated with a Google account.
+ AutoAdminUserWithEmail: ""
+
+ # If auto_admin_first_user is set to true, the first user to log in when no
+ # other admin users exist will automatically become an admin user.
+ AutoAdminFirstUser: false
+
+ # Email address to notify whenever a user creates a profile for the
+ # first time
+ UserProfileNotificationAddress: ""
+ AdminNotifierEmailFrom: arvados@example.com
+ EmailSubjectPrefix: "[ARVADOS] "
+ UserNotifierEmailFrom: arvados@example.com
+ NewUserNotificationRecipients: []
+ NewInactiveUserNotificationRecipients: []
+
+ AuditLogs:
+ # Time to keep audit logs, in seconds. (An audit log is a row added
+ # to the "logs" table in the PostgreSQL database each time an
+ # Arvados object is created, modified, or deleted.)
+ #
+ # Currently, websocket event notifications rely on audit logs, so
+ # this should not be set lower than 600 (5 minutes).
+ MaxAge: 1209600
+
+ # Maximum number of log rows to delete in a single SQL transaction.
+ #
+ # If max_audit_log_delete_batch is 0, log entries will never be
+ # deleted by Arvados. Cleanup can be done by an external process
+ # without affecting any Arvados system processes, as long as very
+ # recent (<5 minutes old) logs are not deleted.
+ #
+ # 100000 is a reasonable batch size for most sites.
+ MaxDeleteBatch: 0
+
+ # Attributes to suppress in events and audit logs. Notably,
+ # specifying ["manifest_text"] here typically makes the database
+ # smaller and faster.
+ #
+ # Warning: Using any non-empty value here can have undesirable side
+ # effects for any client or component that relies on event logs.
+ # Use at your own risk.
+ UnloggedAttributes: []
+
+ SystemLogs:
+ # Maximum characters of (JSON-encoded) query parameters to include
+ # in each request log entry. When params exceed this size, they will
+ # be JSON-encoded, truncated to this size, and logged as
+ # params_truncated.
+ MaxRequestLogParamsSize: 2000
+
+ Collections:
+ # Allow clients to create collections by providing a manifest with
+ # unsigned data blob locators. IMPORTANT: This effectively disables
+ # access controls for data stored in Keep: a client who knows a hash
+ # can write a manifest that references the hash, pass it to
+ # collections.create (which will create a permission link), use
+ # collections.get to obtain a signature for that data locator, and
+ # use that signed locator to retrieve the data from Keep. Therefore,
+ # do not turn this on if your users expect to keep data private from
+ # one another!
+ BlobSigning: true
+
+ # blob_signing_key is a string of alphanumeric characters used to
+ # generate permission signatures for Keep locators. It must be
+ # identical to the permission key given to Keep. IMPORTANT: This is
+ # a site secret. It should be at least 50 characters.
+ #
+ # Modifying blob_signing_key will invalidate all existing
+ # signatures, which can cause programs to fail (e.g., arv-put,
+ # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
+ # no such processes are running.
+ BlobSigningKey: ""
+
+ # Default replication level for collections. This is used when a
+ # collection's replication_desired attribute is nil.
+ DefaultReplication: 2
+
+ # Lifetime (in seconds) of blob permission signatures generated by
+ # the API server. This determines how long a client can take (after
+ # retrieving a collection record) to retrieve the collection data
+ # from Keep. If the client needs more time than that (assuming the
+ # collection still has the same content and the relevant user/token
+ # still has permission) the client can retrieve the collection again
+ # to get fresh signatures.
+ #
+ # This must be exactly equal to the -blob-signature-ttl flag used by
+ # keepstore servers. Otherwise, reading data blocks and saving
+ # collections will fail with HTTP 403 permission errors.
+ #
+ # Modifying blob_signature_ttl invalidates existing signatures; see
+ # blob_signing_key note above.
+ #
+ # The default is 2 weeks.
+ BlobSigningTTL: 1209600
+
+ # Default lifetime for ephemeral collections: 2 weeks. This must not
+ # be less than blob_signature_ttl.
+ DefaultTrashLifetime: 1209600
+
+ # Interval (seconds) between trash sweeps. During a trash sweep,
+ # collections are marked as trash if their trash_at time has
+ # arrived, and deleted if their delete_at time has arrived.
+ TrashSweepInterval: 60
+
+ # If true, enable collection versioning.
+ # When a collection's preserve_version field is true or the current version
+ # is older than the amount of seconds defined on preserve_version_if_idle,
+ # a snapshot of the collection's previous state is created and linked to
+ # the current collection.
+ CollectionVersioning: false
+
+ # 0 = auto-create a new version on every update.
+ # -1 = never auto-create new versions.
+ # > 0 = auto-create a new version when older than the specified number of seconds.
+ PreserveVersionIfIdle: -1
+
+ Login:
+ # These settings are provided by your OAuth2 provider (e.g.,
+ # sso-provider).
+ ProviderAppSecret: ""
+ ProviderAppID: ""
+
+ Git:
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ Repositories: /var/lib/arvados/git/repositories
+
+ TLS:
+ Insecure: false
+
+ Containers:
+ # List of supported Docker Registry image formats that compute nodes
+ # are able to use. `arv keep docker` will error out if a user tries
+ # to store an image with an unsupported format. Use an empty array
+ # to skip the compatibility check (and display a warning message to
+ # that effect).
+ #
+ # Example for sites running docker < 1.10: ["v1"]
+ # Example for sites running docker >= 1.10: ["v2"]
+ # Example for disabling check: []
+ SupportedDockerImageFormats: ["v2"]
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ # Default value for keep_cache_ram of a container's runtime_constraints.
+ DefaultKeepCacheRAM: 268435456
+
+ # Number of times a container can be unlocked before being
+ # automatically cancelled.
+ MaxDispatchAttempts: 5
+
+ # Default value for container_count_max for container requests. This is the
+ # number of times Arvados will create a new container to satisfy a container
+ # request. If a container is cancelled it will retry a new container if
+ # container_count < container_count_max on any container requests associated
+ # with the cancelled container.
+ MaxRetryAttempts: 3
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ MaxComputeVMs: 64
+
+ # Preemptible instance support (e.g. AWS Spot Instances)
+ # When true, child containers will get created with the preemptible
+ # scheduling parameter parameter set.
+ UsePreemptibleInstances: false
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ Logging:
+ # When you run the db:delete_old_container_logs task, it will find
+ # containers that have been finished for at least this many seconds,
+ # and delete their stdout, stderr, arv-mount, crunch-run, and
+ # crunchstat logs from the logs table.
+ MaxAge: 720h
+
+ # These two settings control how frequently log events are flushed to the
+ # database. Log lines are buffered until either crunch_log_bytes_per_event
+ # has been reached or crunch_log_seconds_between_events has elapsed since
+ # the last flush.
+ LogBytesPerEvent: 4096
+ LogSecondsBetweenEvents: 1
+
+ # The sample period for throttling logs, in seconds.
+ LogThrottlePeriod: 60
+
+ # Maximum number of bytes that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleBytes: 65536
+
+ # Maximum number of lines that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleLines: 1024
+
+ # Maximum bytes that may be logged by a single job. Log bytes that are
+ # silenced by throttling are not counted against this total.
+ LimitLogBytesPerJob: 67108864
+
+ LogPartialLineThrottlePeriod: 5
+
+ # Container logs are written to Keep and saved in a collection,
+ # which is updated periodically while the container runs. This
+ # value sets the interval (given in seconds) between collection
+ # updates.
+ LogUpdatePeriod: 1800
+
+ # The log collection is also updated when the specified amount of
+ # log data (given in bytes) is produced in less than one update
+ # period.
+ LogUpdateSize: 33554432
+
+ SLURM:
+ Managed:
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
+ DNSServerConfDir: ""
+
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
+ DNSServerConfTemplate: ""
+
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
+ DNSServerReloadCommand: ""
+
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ DNSServerUpdateCommand: ""
+
+ ComputeNodeDomain: ""
+ ComputeNodeNameservers:
+ - 192.168.1.1
+
+ # Hostname to assign to a compute node when it sends a "ping" and the
+ # hostname in its Node record is nil.
+ # During bootstrapping, the "ping" script is expected to notice the
+ # hostname given in the ping response, and update its unix hostname
+ # accordingly.
+ # If false, leave the hostname alone (this is appropriate if your compute
+ # nodes' hostnames are already assigned by some other mechanism).
+ #
+ # One way or another, the hostnames of your node records should agree
+ # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+ #
+ # Example for compute0000, compute0001, ....:
+ # assign_node_hostname: compute%<slot_number>04d
+ # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+ AssignNodeHostname: "compute%<slot_number>d"
+
+ JobsAPI:
+ # Enable the legacy Jobs API. This value must be a string.
+ # 'auto' -- (default) enable the Jobs API only if it has been used before
+ # (i.e., there are job records in the database)
+ # 'true' -- enable the Jobs API despite lack of existing records.
+ # 'false' -- disable the Jobs API despite presence of existing records.
+ Enable: 'auto'
+
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ GitInternalDir: /var/lib/arvados/internal.git
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ DefaultDockerImage: ""
+
+ # none or slurm_immediate
+ CrunchJobWrapper: none
+
+ # username, or false = do not set uid when running jobs.
+ CrunchJobUser: crunch
+
+ # The web service must be able to create/write this file, and
+ # crunch-job must be able to stat() it.
+ CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+ # Control job reuse behavior when two completed jobs match the
+ # search criteria and have different outputs.
+ #
+ # If true, in case of a conflict, reuse the earliest job (this is
+ # similar to container reuse behavior).
+ #
+ # If false, in case of a conflict, do not reuse any completed job,
+ # but do reuse an already-running job if available (this is the
+ # original job reuse behavior, and is still the default).
+ ReuseJobIfOutputsDiffer: false
+
+ Mail:
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
+ SendUserSetupNotificationEmail: ""
+ IssueReporterEmailFrom: ""
+ IssueReporterEmailTo: ""
+ SupportEmailAddress: ""
+ EmailFrom: ""
+ RemoteClusters:
+ "*":
+ Proxy: false
+ ActivateUsers: false
before_action(:render_404_if_no_object,
except: [:index, :create] + ERROR_ACTIONS)
- theme Rails.configuration.arvados_theme
-
attr_writer :resource_attrs
begin
def default_url_options
options = {}
- if Rails.configuration.host
- options[:host] = Rails.configuration.host
- end
- if Rails.configuration.port
- options[:port] = Rails.configuration.port
- end
- if Rails.configuration.protocol
- options[:protocol] = Rails.configuration.protocol
+ if Rails.configuration.Services.Controller.ExternalURL != ""
+ exturl = URI.parse(Rails.configuration.Services.Controller.ExternalURL)
+ options[:host] = exturl.host
+ options[:port] = exturl.port
+ options[:protocol] = exturl.scheme
end
options
end
limit_query.each do |record|
new_limit += 1
read_total += record.read_length.to_i
- if read_total >= Rails.configuration.max_index_database_read
+ if read_total >= Rails.configuration.API.MaxIndexDatabaseRead
new_limit -= 1 if new_limit > 1
@limit = new_limit
break
end
def disable_api_methods
- if Rails.configuration.disable_api_methods.
- include?(controller_name + "." + action_name)
+ if Rails.configuration.API.DisabledAPIs.include?(controller_name + "." + action_name)
send_error("Disabled", status: 404)
end
end
table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
- disabled_methods = Rails.configuration.disable_api_methods
+ disabled_methods = Rails.configuration.API.DisabledAPIs
avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
klasses = avail_klasses.keys
mgmt_token = Rails.configuration.ManagementToken
auth_header = request.headers['Authorization']
- if !mgmt_token
+ if mgmt_token == ""
send_json ({"errors" => "disabled"}), status: 404
elsif !auth_header
send_json ({"errors" => "authorization required"}), status: 401
def discovery_doc
Rails.cache.fetch 'arvados_v1_rest_discovery' do
Rails.application.eager_load!
+ remoteHosts = {}
+ Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end }
discovery = {
kind: "discovery#restDescription",
discoveryVersion: "v1",
title: "Arvados API",
description: "The API to interact with Arvados.",
documentationLink: "http://doc.arvados.org/api/index.html",
- defaultCollectionReplication: Rails.configuration.default_collection_replication,
+ defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication,
protocol: "rest",
baseUrl: root_url + "arvados/v1/",
basePath: "/arvados/v1/",
rootUrl: root_url,
servicePath: "arvados/v1/",
batchPath: "batch",
- uuidPrefix: Rails.application.config.uuid_prefix,
- defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
- blobSignatureTtl: Rails.application.config.blob_signature_ttl,
- maxRequestSize: Rails.application.config.max_request_size,
- maxItemsPerResponse: Rails.application.config.max_items_per_response,
- dockerImageFormats: Rails.application.config.docker_image_formats,
- crunchLogBytesPerEvent: Rails.application.config.crunch_log_bytes_per_event,
- crunchLogSecondsBetweenEvents: Rails.application.config.crunch_log_seconds_between_events,
- crunchLogThrottlePeriod: Rails.application.config.crunch_log_throttle_period,
- crunchLogThrottleBytes: Rails.application.config.crunch_log_throttle_bytes,
- crunchLogThrottleLines: Rails.application.config.crunch_log_throttle_lines,
- crunchLimitLogBytesPerJob: Rails.application.config.crunch_limit_log_bytes_per_job,
- crunchLogPartialLineThrottlePeriod: Rails.application.config.crunch_log_partial_line_throttle_period,
- crunchLogUpdatePeriod: Rails.application.config.crunch_log_update_period,
- crunchLogUpdateSize: Rails.application.config.crunch_log_update_size,
- remoteHosts: Rails.configuration.remote_hosts,
- remoteHostsViaDNS: Rails.configuration.remote_hosts_via_dns,
- websocketUrl: Rails.application.config.websocket_address,
- workbenchUrl: Rails.application.config.workbench_address,
- keepWebServiceUrl: Rails.application.config.keep_web_service_url,
- gitUrl: case Rails.application.config.git_repo_https_base
- when false
- ''
- when true
- 'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix
- else
- Rails.application.config.git_repo_https_base
- end,
+ uuidPrefix: Rails.configuration.ClusterID,
+ defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime,
+ blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,
+ maxRequestSize: Rails.configuration.API.MaxRequestSize,
+ maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,
+ dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats,
+ crunchLogBytesPerEvent: Rails.configuration.Containers.Logging.LogBytesPerEvent,
+ crunchLogSecondsBetweenEvents: Rails.configuration.Containers.Logging.LogSecondsBetweenEvents,
+ crunchLogThrottlePeriod: Rails.configuration.Containers.Logging.LogThrottlePeriod,
+ crunchLogThrottleBytes: Rails.configuration.Containers.Logging.LogThrottleBytes,
+ crunchLogThrottleLines: Rails.configuration.Containers.Logging.LogThrottleLines,
+ crunchLimitLogBytesPerJob: Rails.configuration.Containers.Logging.LimitLogBytesPerJob,
+ crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod,
+ crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,
+ crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,
+ remoteHosts: remoteHosts,
+ remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"].Proxy,
+ websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,
+ workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,
+ keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,
+ gitUrl: Rails.configuration.Services.GitHTTP.ExternalURL.to_s,
parameters: {
alt: {
type: "string",
end
end
end
- Rails.configuration.disable_api_methods.each do |method|
+ Rails.configuration.API.DisabledAPIs.each do |method|
ctrl, action = method.split('.', 2)
discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
def home
respond_to do |f|
f.html do
- if Rails.configuration.workbench_address
- redirect_to Rails.configuration.workbench_address
+ if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.empty?
+ redirect_to Rails.configuration.Services.Workbench1.ExternalURL
else
render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
end
:first_name => omniauth['info']['first_name'],
:last_name => omniauth['info']['last_name'],
:identity_url => omniauth['info']['identity_url'],
- :is_active => Rails.configuration.new_users_are_active,
+ :is_active => Rails.configuration.Users.NewUsersAreActive,
:owner_uuid => system_user_uuid)
if omniauth['info']['username']
user.set_initial_username(requested: omniauth['info']['username'])
flash[:notice] = 'You have logged off'
return_to = params[:return_to] || root_url
- redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+ redirect_to "#{Rails.configuration.Services.SSO.ExternalURL}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
end
# login - Just bounce to /auth/joshid. The only purpose of this function is
class AdminNotifier < ActionMailer::Base
include AbstractController::Callbacks
- default from: Rails.configuration.admin_notifier_email_from
+ default from: Rails.configuration.Users.AdminNotifierEmailFrom
def new_user(user)
@user = user
- if not Rails.configuration.new_user_notification_recipients.empty? then
- @recipients = Rails.configuration.new_user_notification_recipients
+ if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then
+ @recipients = Rails.configuration.Users.NewUserNotificationRecipients
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
add_to_subject = ''
- if Rails.configuration.auto_setup_new_users
+ if Rails.configuration.Users.AutoSetupNewUsers
add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
end
mail(to: @recipients,
- subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+ subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New user created#{add_to_subject} notification"
)
end
end
def new_inactive_user(user)
@user = user
- if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
- @recipients = Rails.configuration.new_inactive_user_notification_recipients
+ if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then
+ @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
mail(to: @recipients,
- subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+ subject: "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification"
)
end
end
# SPDX-License-Identifier: AGPL-3.0
class ProfileNotifier < ActionMailer::Base
- default from: Rails.configuration.admin_notifier_email_from
+ default from: Rails.configuration.Users.AdminNotifierEmailFrom
def profile_created(user, address)
@user = user
class UserNotifier < ActionMailer::Base
include AbstractController::Callbacks
- default from: Rails.configuration.user_notifier_email_from
+ default from: Rails.configuration.Users.UserNotifierEmailFrom
def account_is_setup(user)
@user = user
end
def self.remote_host(uuid_prefix:)
- Rails.configuration.remote_hosts[uuid_prefix] ||
- (Rails.configuration.remote_hosts_via_dns &&
+ Rails.configuration.RemoteClusters[uuid_prefix].Host ||
+ (Rails.configuration.RemoteClusters["*"].Proxy &&
uuid_prefix+".arvadosapi.com")
end
def self.validate(token:, remote: nil)
return nil if !token
- remote ||= Rails.configuration.uuid_prefix
+ remote ||= Rails.configuration.ClusterID
case token[0..2]
when 'v2/'
end
uuid_prefix = uuid[0..4]
- if uuid_prefix == Rails.configuration.uuid_prefix
+ if uuid_prefix == Rails.configuration.ClusterID
# If the token were valid, we would have validated it above
return nil
elsif uuid_prefix.length != 5
# [re]validate it.
begin
clnt = HTTPClient.new
- if Rails.configuration.sso_insecure
+ if Rails.configuration.TLS.Insecure
clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
else
# Use system CA certificates
end
remote_user = SafeJSON.load(
clnt.get_content('https://' + host + '/arvados/v1/users/current',
- {'remote' => Rails.configuration.uuid_prefix},
+ {'remote' => Rails.configuration.ClusterID},
{'Authorization' => 'Bearer ' + token}))
rescue => e
Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
end
end
- if Rails.configuration.new_users_are_active ||
- Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
+ if Rails.configuration.Users.NewUsersAreActive ||
+ Rails.configuration.RemoteClusters[remote_user['uuid'][0..4]].andand["ActivateUsers"]
# Update is_active to whatever it is at the remote end
user.is_active = remote_user['is_active']
elsif !remote_user['is_active']
end
def logged_attributes
- attributes.except(*Rails.configuration.unlogged_attributes)
+ attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes)
end
def self.full_text_searchable_columns
end
def self.uuid_like_pattern
- "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
+ "#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________"
end
def self.uuid_regex
end
def is_audit_logging_enabled?
- return !(Rails.configuration.max_audit_log_age.to_i == 0 &&
- Rails.configuration.max_audit_log_delete_batch.to_i > 0)
+ return !(Rails.configuration.AuditLogs.MaxAge.to_i == 0 &&
+ Rails.configuration.AuditLogs.MaxDeleteBatch.to_i > 0)
end
def log_start_state
timestamp = opts[:expire]
else
timestamp = db_current_time.to_i +
- (opts[:ttl] || Rails.configuration.blob_signature_ttl)
+ (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL)
end
timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
- blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
# Generate a signature.
signature =
- generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+ generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)
blob_locator + '+A' + signature + '@' + timestamp_hex
if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
end
- blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_s(16)
my_signature =
- generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+ generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),
blob_hash, opts[:api_token], timestamp, blob_signature_ttl)
if my_signature != given_signature
# Signature provided, but verify_signature did not like it.
logger.warn "Invalid signature on locator #{tok}"
raise ArvadosModel::PermissionDeniedError
- elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+ elsif !Rails.configuration.Collections.BlobSigning
# No signature provided, but we are running in insecure mode.
logger.debug "Missing signature on locator #{tok} ignored"
elsif Blob.new(tok).empty?
end
def should_preserve_version?
- return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys))
+ return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys))
- idle_threshold = Rails.configuration.preserve_version_if_idle
+ idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle
if !self.preserve_version_was &&
(idle_threshold < 0 ||
(idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))
return manifest_text
else
token = Thread.current[:token]
- exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl,
+ exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL,
trash_at].compact.map(&:to_i).min
self.class.sign_manifest manifest_text, token, exp
end
def self.sign_manifest manifest, token, exp=nil
if exp.nil?
- exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+ exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
end
signing_opts = {
api_token: token,
#
# If filter_compatible_format is true (the default), only return image
# collections which are support by the installation as indicated by
- # Rails.configuration.docker_image_formats. Will follow
+ # Rails.configuration.Containers.SupportedDockerImageFormats. Will follow
# 'docker_image_migration' links if search_term resolves to an incompatible
# image, but an equivalent compatible image is available.
def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)
joins("JOIN collections ON links.head_uuid = collections.uuid").
order("links.created_at DESC")
- if (Rails.configuration.docker_image_formats.include? 'v1' and
- Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false
+ docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats
+
+ if (docker_image_formats.include? 'v1' and
+ docker_image_formats.include? 'v2') or filter_compatible_format == false
pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/
- elsif Rails.configuration.docker_image_formats.include? 'v2'
+ elsif docker_image_formats.include? 'v2'
pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/
- elsif Rails.configuration.docker_image_formats.include? 'v1'
+ elsif docker_image_formats.include? 'v1'
pattern = /^[0-9A-Fa-f]{64}\.tar$/
else
- raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}"
+ raise "Unrecognized configuration for docker_image_formats #{docker_image_formats}"
end
# If the search term is a Collection locator that contains one file
if loc = Keep::Locator.parse(search_term)
loc.strip_hints!
coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)
- if coll_match.any? or Rails.configuration.remote_hosts.length == 0
+ rc = Rails.configuration.RemoteClusters.select{ |k|
+ k != :"*" && k != Rails.configuration.ClusterID}
+ if coll_match.any? or rc.length == 0
return get_compatible_images(readers, pattern, coll_match)
else
# Allow bare pdh that doesn't exist in the local database so
unless src_gitdir
raise ArgumentError.new "no local repository for #{repo_name}"
end
- dst_gitdir = Rails.configuration.git_internal_dir
+ dst_gitdir = Rails.configuration.Containers.JobsAPI.GitInternalDir
begin
commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
protected
def ask_git_whether_is
- @gitdirbase = Rails.configuration.git_repositories_dir
+ @gitdirbase = Rails.configuration.Git.Repositories
self.is = nil
Dir.foreach @gitdirbase do |repo|
next if repo.match(/^\./)
rc = {}
defaults = {
'keep_cache_ram' =>
- Rails.configuration.container_default_keep_cache_ram,
+ Rails.configuration.Containers.DefaultKeepCacheRAM,
}
defaults.merge(runtime_constraints).each do |k, v|
if v.is_a? Array
transaction do
reload(lock: 'FOR UPDATE')
check_unlock_fail
- if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+ if self.lock_count < Rails.configuration.Containers.MaxDispatchAttempts
update_attributes!(state: Queued)
else
update_attributes!(state: Cancelled,
self.mounts ||= {}
self.secret_mounts ||= {}
self.cwd ||= "."
- self.container_count_max ||= Rails.configuration.container_count_max
+ self.container_count_max ||= Rails.configuration.Containers.MaxComputeVMs
self.scheduling_parameters ||= {}
self.output_ttl ||= 0
self.priority ||= 0
if self.state == Committed
# If preemptible instances (eg: AWS Spot Instances) are allowed,
# ask them on child containers by default.
- if Rails.configuration.preemptible_instances and !c.nil? and
+ if Rails.configuration.Containers.UsePreemptibleInstances and !c.nil? and
self.scheduling_parameters['preemptible'].nil?
self.scheduling_parameters['preemptible'] = true
end
scheduling_parameters['partitions'].size)
errors.add :scheduling_parameters, "partitions must be an array of strings"
end
- if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+ if !Rails.configuration.Containers.UsePreemptibleInstances and scheduling_parameters['preemptible']
errors.add :scheduling_parameters, "preemptible instances are not allowed"
end
if scheduling_parameters.include? 'max_run_time' and
log_reuse_info { "job #{j.uuid} has nil output" }
elsif j.log.nil?
log_reuse_info { "job #{j.uuid} has nil log" }
- elsif Rails.configuration.reuse_job_if_outputs_differ
+ elsif Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer
if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
# Ignore: keep looking for an incomplete job or one whose
# output is readable.
end
def find_docker_image_locator
- if runtime_constraints.is_a? Hash
+ if runtime_constraints.is_a? Hash and Rails.configuration.Containers.JobsAPI.DefaultDockerImage != ""
runtime_constraints['docker_image'] ||=
- Rails.configuration.default_docker_image_for_jobs
+ Rails.configuration.Containers.JobsAPI.DefaultDockerImage
end
resolve_runtime_constraint("docker_image",
def trigger_crunch_dispatch_if_cancelled
if @need_crunch_dispatch_trigger
- File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+ File.open(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger, 'wb') do
# That's all, just create/touch a file for crunch-job to see.
end
end
api_accessible :superuser, :extend => :user do |t|
t.add :first_ping_at
t.add :info
- t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
+ t.add lambda { |x| Rails.configuration.Containers.SLURM.Managed.ComputeNodeNameservers }, :as => :nameservers
end
after_initialize do
end
def domain
- super || Rails.configuration.compute_node_domain
+ super || Rails.configuration.Containers.SLURM.Managed.ComputeNodeDomain
end
def api_job_uuid
protected
def assign_hostname
- if self.hostname.nil? and Rails.configuration.assign_node_hostname
+ if self.hostname.nil? and Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
self.hostname = self.class.hostname_for_slot(self.slot_number)
end
end
# query label:
'Node.available_slot_number',
# [col_id, val] for $1 vars:
- [[nil, Rails.configuration.max_compute_nodes]],
+ [[nil, Rails.configuration.Containers.MaxComputeVMs]],
).rows.first.andand.first
end
template_vars = {
hostname: hostname,
- uuid_prefix: Rails.configuration.uuid_prefix,
+ uuid_prefix: Rails.configuration.ClusterID,
ip_address: ip_address,
ptr_domain: ptr_domain,
}
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template
+ if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+ !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty?)
tmpfile = nil
begin
begin
- template = IO.read(Rails.configuration.dns_server_conf_template)
+ template = IO.read(Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate)
rescue IOError, SystemCallError => e
- logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}"
+ logger.error "Reading #{Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate}: #{e.message}"
raise
end
- hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+ hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
Tempfile.open(["#{hostname}-", ".conf.tmp"],
- Rails.configuration.dns_server_conf_dir) do |f|
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir) do |f|
tmpfile = f.path
f.puts template % template_vars
end
end
end
- if Rails.configuration.dns_server_update_command
- cmd = Rails.configuration.dns_server_update_command % template_vars
+ if !Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand.empty?
+ cmd = Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand % template_vars
if not system cmd
logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
ok = false
end
end
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command
- restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt')
+ if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+ !Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand.to_s.empty?)
+ restartfile = File.join(Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, 'restart.txt')
begin
File.open(restartfile, 'w') do |f|
# Typically, this is used to trigger a dns server restart
- f.puts Rails.configuration.dns_server_reload_command
+ f.puts Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand
end
rescue IOError, SystemCallError => e
logger.error "Unable to write #{restartfile}: #{e.message}"
end
def self.hostname_for_slot(slot_number)
- config = Rails.configuration.assign_node_hostname
+ config = Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname
return nil if !config
# At startup, make sure all DNS entries exist. Otherwise, slurmctld
# will refuse to start.
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname
- (0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
+ if (!Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir.to_s.empty? and
+ !Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate.to_s.empty? and
+ !Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname.empty?)
+
+ (0..Rails.configuration.Containers.MaxComputeVMs-1).each do |slot_number|
hostname = hostname_for_slot(slot_number)
- hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+ hostfile = File.join Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir, "#{hostname}.conf"
if !File.exist? hostfile
n = Node.where(:slot_number => slot_number).first
if n.nil? or n.ip_address.nil?
# prefers bare repositories over checkouts.
[["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
[:uuid, :name].each do |path_attr|
- git_dir = File.join(Rails.configuration.git_repositories_dir,
+ git_dir = File.join(Rails.configuration.Git.Repositories,
repo_base % send(path_attr), *join_args)
return git_dir if File.exist?(git_dir)
end
end
def ssh_clone_url
- _clone_url :git_repo_ssh_base, 'git@git.%s.arvadosapi.com:'
+ _clone_url Rails.configuration.Services.GitSSH.andand.ExternalURL, 'ssh://git@git.%s.arvadosapi.com'
end
def https_clone_url
- _clone_url :git_repo_https_base, 'https://git.%s.arvadosapi.com/'
+ _clone_url Rails.configuration.Services.GitHTTP.andand.ExternalURL, 'https://git.%s.arvadosapi.com/'
end
def _clone_url config_var, default_base_fmt
- configured_base = Rails.configuration.send config_var
- return nil if configured_base == false
- prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5]
- if prefix == Rails.configuration.uuid_prefix and configured_base != true
- base = configured_base
+ if not config_var
+ return ""
+ end
+ prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
+ if prefix == Rails.configuration.ClusterID and config_var != URI("")
+ base = config_var
+ else
+ base = URI(default_base_fmt % prefix)
+ end
+ if base.scheme == "ssh"
+ '%s@%s:%s.git' % [base.user, base.host, name]
else
- base = default_base_fmt % prefix
+ '%s%s.git' % [base, name]
end
- '%s%s.git' % [base, name]
end
end
after_create :add_system_group_permission_link
after_create :invalidate_permissions_cache
after_create :auto_setup_new_user, :if => Proc.new { |user|
- Rails.configuration.auto_setup_new_users and
+ Rails.configuration.Users.AutoSetupNewUsers and
(user.uuid != system_user_uuid) and
(user.uuid != anonymous_user_uuid)
}
def is_invited
!!(self.is_active ||
- Rails.configuration.new_users_are_active ||
+ Rails.configuration.Users.NewUsersAreActive ||
self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
end
current_user.andand.is_admin or
(self == current_user &&
self.redirect_to_user_uuid.nil? &&
- self.is_active == Rails.configuration.new_users_are_active)
+ self.is_active == Rails.configuration.Users.NewUsersAreActive)
end
def check_auto_admin
return if self.uuid.end_with?('anonymouspublic')
if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
- Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+ !Rails.configuration.Users.AutoAdminUserWithEmail.empty? and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
(User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
- Rails.configuration.auto_admin_first_user)
+ Rails.configuration.Users.AutoAdminFirstUser)
self.is_admin = true
self.is_active = true
end
quoted_name = self.class.connection.quote_string(basename)
next_username = basename
next_suffix = 1
- while Rails.configuration.auto_setup_name_blacklist.include?(next_username)
+ while Rails.configuration.Users.AutoSetupUsernameBlacklist.include?(next_username)
next_suffix += 1
next_username = "%s%i" % [basename, next_suffix]
end
# create login permission for the given vm_uuid, if it does not already exist
def create_vm_login_permission_link(vm_uuid, repo_name)
# vm uuid is optional
- return if !vm_uuid
+ return if vm_uuid == ""
vm = VirtualMachine.where(uuid: vm_uuid).first
if !vm
def auto_setup_new_user
setup(openid_prefix: Rails.configuration.default_openid_prefix)
if username
- create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid,
+ create_vm_login_permission_link(Rails.configuration.Users.AutoSetupNewUsersWithVmUUID,
username)
repo_name = "#{username}/#{username}"
- if Rails.configuration.auto_setup_new_users_with_repository and
+ if Rails.configuration.Users.AutoSetupNewUsersWithRepository and
Repository.where(name: repo_name).first.nil?
repo = Repository.create!(name: repo_name, owner_uuid: uuid)
Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
def send_profile_created_notification
if self.prefs_changed?
if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
- profile_notification_address = Rails.configuration.user_profile_notification_address
+ profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress
ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
end
end
<%= @user.full_name %> <<%= @user.email %>>
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
Please see workbench for more information:
- <%= Rails.configuration.workbench_address %>
+ <%= Rails.configuration.Services.Workbench1.ExternalURL %>
<% end -%>
Thanks,
<%
add_to_message = ''
- if Rails.configuration.auto_setup_new_users
+ if Rails.configuration.Users.AutoSetupNewUsers
add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
end
%>
This user is <%= @user.is_active ? '' : 'NOT ' %>active.
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services.Workbench1.ExternalURL -%>
Please see workbench for more information:
- <%= Rails.configuration.workbench_address %>
+ <%= Rails.configuration.Services.Workbench1.ExternalURL %>
<% end -%>
Thanks,
Your friendly Arvados robot.
-
Hi there,
<% end -%>
-Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.workbench_address %>at
+Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services.Workbench1.ExternalURL %>at
- <%= Rails.configuration.workbench_address %><%= "/" if !Rails.configuration.workbench_address.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
+ <%= Rails.configuration.Services.Workbench1.ExternalURL %><%= "/" if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
for connection instructions.
# 5. Section in application.default.yml called "common"
common:
- ###
- ### Essential site configuration
- ###
-
- # The prefix used for all database identifiers to identify the record as
- # originating from this site. Must be exactly 5 alphanumeric characters
- # (lowercase ASCII letters and digits).
- uuid_prefix: ~
-
- # secret_token is a string of alphanumeric characters used by Rails
- # to sign session tokens. IMPORTANT: This is a site secret. It
- # should be at least 50 characters.
- secret_token: ~
-
- # blob_signing_key is a string of alphanumeric characters used to
- # generate permission signatures for Keep locators. It must be
- # identical to the permission key given to Keep. IMPORTANT: This is
- # a site secret. It should be at least 50 characters.
- #
- # Modifying blob_signing_key will invalidate all existing
- # signatures, which can cause programs to fail (e.g., arv-put,
- # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
- # no such processes are running.
- blob_signing_key: ~
-
- # These settings are provided by your OAuth2 provider (e.g.,
- # sso-provider).
- sso_app_secret: ~
- sso_app_id: ~
- sso_provider_url: ~
-
- # If this is not false, HTML requests at the API server's root URL
- # are redirected to this location, and it is provided in the text of
- # user activation notification email messages to remind them where
- # to log in.
- workbench_address: false
-
- # Client-facing URI for websocket service. Nginx should be
- # configured to proxy this URI to arvados-ws; see
- # http://doc.arvados.org/install/install-ws.html
- #
- # If websocket_address is false (which is the default), no websocket
- # server will be advertised to clients. This configuration is not
- # supported.
- #
- # Example:
- #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
- websocket_address: false
-
- # Maximum number of websocket connections allowed
- websocket_max_connections: 500
-
- # Maximum number of events a single connection can be backlogged
- websocket_max_notify_backlog: 1000
-
- # Maximum number of subscriptions a single websocket connection can have
- # active.
- websocket_max_filters: 10
-
- # Git repositories must be readable by api server, or you won't be
- # able to submit crunch jobs. To pass the test suites, put a clone
- # of the arvados tree in {git_repositories_dir}/arvados.git or
- # {git_repositories_dir}/arvados/.git
- git_repositories_dir: /var/lib/arvados/git/repositories
-
- # This is a (bare) repository that stores commits used in jobs. When a job
- # runs, the source commits are first fetched into this repository, then this
- # repository is used to deploy to compute nodes. This should NOT be a
- # subdirectory of {git_repositiories_dir}.
- git_internal_dir: /var/lib/arvados/internal.git
-
- # Default replication level for collections. This is used when a
- # collection's replication_desired attribute is nil.
- default_collection_replication: 2
-
-
- ###
- ### Overriding default advertised hostnames/URLs
- ###
-
- # If not false, this is the hostname, port, and protocol that will be used
- # for root_url and advertised in the discovery document. By default, use
- # the default Rails logic for deciding on a hostname.
- host: false
- port: false
- protocol: false
-
- # Base part of SSH git clone url given with repository resources. If
- # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
- # used. If false, SSH clone URLs are not advertised. Include a
- # trailing ":" or "/" if needed: it will not be added automatically.
- git_repo_ssh_base: true
-
- # Base part of HTTPS git clone urls given with repository
- # resources. This is expected to be an arv-git-httpd service which
- # accepts API tokens as HTTP-auth passwords. If true, the default
- # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
- # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
- # if needed: it will not be added automatically.
- git_repo_https_base: true
-
-
- ###
- ### New user and & email settings
- ###
-
- # Config parameters to automatically setup new users. If enabled,
- # this users will be able to self-activate. Enable this if you want
- # to run an open instance where anyone can create an account and use
- # the system without requiring manual approval.
- #
- # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
- # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
- auto_setup_new_users: false
- auto_setup_new_users_with_vm_uuid: false
- auto_setup_new_users_with_repository: false
- auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
-
- # When new_users_are_active is set to true, new users will be active
- # immediately. This skips the "self-activate" step which enforces
- # user agreements. Should only be enabled for development.
- new_users_are_active: false
-
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
- # In the default configuration, authentication happens through the Arvados SSO
- # server, which uses OAuth2 against Google's servers, so in that case this
- # should be an address associated with a Google account.
- auto_admin_user: false
-
- # If auto_admin_first_user is set to true, the first user to log in when no
- # other admin users exist will automatically become an admin user.
- auto_admin_first_user: false
-
- # Email address to notify whenever a user creates a profile for the
- # first time
- user_profile_notification_address: false
-
- admin_notifier_email_from: arvados@example.com
- email_subject_prefix: "[ARVADOS] "
- user_notifier_email_from: arvados@example.com
- new_user_notification_recipients: [ ]
- new_inactive_user_notification_recipients: [ ]
-
-
- ###
- ### Limits, timeouts and durations
- ###
-
- # Lifetime (in seconds) of blob permission signatures generated by
- # the API server. This determines how long a client can take (after
- # retrieving a collection record) to retrieve the collection data
- # from Keep. If the client needs more time than that (assuming the
- # collection still has the same content and the relevant user/token
- # still has permission) the client can retrieve the collection again
- # to get fresh signatures.
- #
- # This must be exactly equal to the -blob-signature-ttl flag used by
- # keepstore servers. Otherwise, reading data blocks and saving
- # collections will fail with HTTP 403 permission errors.
- #
- # Modifying blob_signature_ttl invalidates existing signatures; see
- # blob_signing_key note above.
- #
- # The default is 2 weeks.
- blob_signature_ttl: 1209600
-
- # Default lifetime for ephemeral collections: 2 weeks. This must not
- # be less than blob_signature_ttl.
- default_trash_lifetime: 1209600
-
- # Interval (seconds) between trash sweeps. During a trash sweep,
- # collections are marked as trash if their trash_at time has
- # arrived, and deleted if their delete_at time has arrived.
- trash_sweep_interval: 60
-
- # Interval (seconds) between asynchronous permission view updates. Any
- # permission-updating API called with the 'async' parameter schedules a an
- # update on the permission view in the future, if not already scheduled.
- async_permissions_update_interval: 20
-
- # Maximum characters of (JSON-encoded) query parameters to include
- # in each request log entry. When params exceed this size, they will
- # be JSON-encoded, truncated to this size, and logged as
- # params_truncated.
- max_request_log_params_size: 2000
-
- # Maximum size (in bytes) allowed for a single API request. This
- # limit is published in the discovery document for use by clients.
- # Note: You must separately configure the upstream web server or
- # proxy to actually enforce the desired maximum request size on the
- # server side.
- max_request_size: 134217728
-
- # Limit the number of bytes read from the database during an index
- # request (by retrieving and returning fewer rows than would
- # normally be returned in a single response).
- # Note 1: This setting never reduces the number of returned rows to
- # zero, no matter how big the first data row is.
- # Note 2: Currently, this is only checked against a specific set of
- # columns that tend to get large (collections.manifest_text,
- # containers.mounts, workflows.definition). Other fields (e.g.,
- # "properties" hashes) are not counted against this limit.
- max_index_database_read: 134217728
-
- # Maximum number of items to return when responding to a APIs that
- # can return partial result sets using limit and offset parameters
- # (e.g., *.index, groups.contents). If a request specifies a "limit"
- # parameter higher than this value, this value is used instead.
- max_items_per_response: 1000
-
- # When you run the db:delete_old_job_logs task, it will find jobs that
- # have been finished for at least this many seconds, and delete their
- # stderr logs from the logs table.
- clean_job_log_rows_after: <%= 30.days %>
-
- # When you run the db:delete_old_container_logs task, it will find
- # containers that have been finished for at least this many seconds,
- # and delete their stdout, stderr, arv-mount, crunch-run, and
- # crunchstat logs from the logs table.
- clean_container_log_rows_after: <%= 30.days %>
-
- # Time to keep audit logs, in seconds. (An audit log is a row added
- # to the "logs" table in the PostgreSQL database each time an
- # Arvados object is created, modified, or deleted.)
- #
- # Currently, websocket event notifications rely on audit logs, so
- # this should not be set lower than 600 (5 minutes).
- max_audit_log_age: 1209600
-
- # Maximum number of log rows to delete in a single SQL transaction.
- #
- # If max_audit_log_delete_batch is 0, log entries will never be
- # deleted by Arvados. Cleanup can be done by an external process
- # without affecting any Arvados system processes, as long as very
- # recent (<5 minutes old) logs are not deleted.
- #
- # 100000 is a reasonable batch size for most sites.
- max_audit_log_delete_batch: 0
-
- # The maximum number of compute nodes that can be in use simultaneously
- # If this limit is reduced, any existing nodes with slot number >= new limit
- # will not be counted against the new limit. In other words, the new limit
- # won't be strictly enforced until those nodes with higher slot numbers
- # go down.
- max_compute_nodes: 64
-
- # These two settings control how frequently log events are flushed to the
- # database. Log lines are buffered until either crunch_log_bytes_per_event
- # has been reached or crunch_log_seconds_between_events has elapsed since
- # the last flush.
- crunch_log_bytes_per_event: 4096
- crunch_log_seconds_between_events: 1
-
- # The sample period for throttling logs, in seconds.
- crunch_log_throttle_period: 60
-
- # Maximum number of bytes that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- crunch_log_throttle_bytes: 65536
-
- # Maximum number of lines that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- crunch_log_throttle_lines: 1024
-
- # Maximum bytes that may be logged by a single job. Log bytes that are
- # silenced by throttling are not counted against this total.
- crunch_limit_log_bytes_per_job: 67108864
-
- crunch_log_partial_line_throttle_period: 5
-
- # Container logs are written to Keep and saved in a collection,
- # which is updated periodically while the container runs. This
- # value sets the interval (given in seconds) between collection
- # updates.
- crunch_log_update_period: 1800
-
- # The log collection is also updated when the specified amount of
- # log data (given in bytes) is produced in less than one update
- # period.
- crunch_log_update_size: 33554432
-
- # Attributes to suppress in events and audit logs. Notably,
- # specifying ["manifest_text"] here typically makes the database
- # smaller and faster.
- #
- # Warning: Using any non-empty value here can have undesirable side
- # effects for any client or component that relies on event logs.
- # Use at your own risk.
- unlogged_attributes: []
-
- # API methods to disable. Disabled methods are not listed in the
- # discovery document, and respond 404 to all requests.
- # Example: ["jobs.create", "pipeline_instances.create"]
- disable_api_methods: []
-
- # Enable the legacy Jobs API.
- # auto -- (default) enable the Jobs API only if it has been used before
- # (i.e., there are job records in the database)
- # true -- enable the Jobs API despite lack of existing records.
- # false -- disable the Jobs API despite presence of existing records.
- enable_legacy_jobs_api: auto
-
- ###
- ### Crunch, DNS & compute node management
- ###
-
- # Preemptible instance support (e.g. AWS Spot Instances)
- # When true, child containers will get created with the preemptible
- # scheduling parameter parameter set.
- preemptible_instances: false
-
- # Docker image to be used when none found in runtime_constraints of a job
- default_docker_image_for_jobs: false
-
- # List of supported Docker Registry image formats that compute nodes
- # are able to use. `arv keep docker` will error out if a user tries
- # to store an image with an unsupported format. Use an empty array
- # to skip the compatibility check (and display a warning message to
- # that effect).
- #
- # Example for sites running docker < 1.10: ["v1"]
- # Example for sites running docker >= 1.10: ["v2"]
- # Example for disabling check: []
- docker_image_formats: ["v2"]
-
- # :none or :slurm_immediate
- crunch_job_wrapper: :none
-
- # username, or false = do not set uid when running jobs.
- crunch_job_user: crunch
-
- # The web service must be able to create/write this file, and
- # crunch-job must be able to stat() it.
- crunch_refresh_trigger: /tmp/crunch_refresh_trigger
-
- # Path to dns server configuration directory
- # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
- # files or touch restart.txt (see below).
- dns_server_conf_dir: false
-
- # Template file for the dns server host snippets. See
- # unbound.template in this directory for an example. If false, do
- # not write any config files.
- dns_server_conf_template: false
-
- # String to write to {dns_server_conf_dir}/restart.txt (with a
- # trailing newline) after updating local data. If false, do not
- # open or write the restart.txt file.
- dns_server_reload_command: false
-
- # Command to run after each DNS update. Template variables will be
- # substituted; see the "unbound" example below. If false, do not run
- # a command.
- dns_server_update_command: false
-
- ## Example for unbound:
- #dns_server_conf_dir: /etc/unbound/conf.d
- #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
- ## ...plus one of the following two methods of reloading:
- #dns_server_reload_command: unbound-control reload
- #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
-
- compute_node_domain: false
- compute_node_nameservers:
- - 192.168.1.1
-
- # Hostname to assign to a compute node when it sends a "ping" and the
- # hostname in its Node record is nil.
- # During bootstrapping, the "ping" script is expected to notice the
- # hostname given in the ping response, and update its unix hostname
- # accordingly.
- # If false, leave the hostname alone (this is appropriate if your compute
- # nodes' hostnames are already assigned by some other mechanism).
- #
- # One way or another, the hostnames of your node records should agree
- # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
- #
- # Example for compute0000, compute0001, ....:
- # assign_node_hostname: compute%<slot_number>04d
- # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
- assign_node_hostname: compute%<slot_number>d
-
-
- ###
- ### Job and container reuse logic.
- ###
-
- # Include details about job reuse decisions in the server log. This
- # causes additional database queries to run, so it should not be
- # enabled unless you expect to examine the resulting logs for
- # troubleshooting purposes.
- log_reuse_decisions: false
-
- # Control job reuse behavior when two completed jobs match the
- # search criteria and have different outputs.
- #
- # If true, in case of a conflict, reuse the earliest job (this is
- # similar to container reuse behavior).
- #
- # If false, in case of a conflict, do not reuse any completed job,
- # but do reuse an already-running job if available (this is the
- # original job reuse behavior, and is still the default).
- reuse_job_if_outputs_differ: false
-
- ###
- ### Federation support.
- ###
-
- # You can enable use of this cluster by users who are authenticated
- # by a remote Arvados site. Control which remote hosts are trusted
- # to authenticate which user IDs by configuring remote_hosts,
- # remote_hosts_via_dns, or both. The default configuration disables
- # remote authentication.
-
- # Map known prefixes to hosts. For example, if user IDs beginning
- # with "zzzzz-" should be authenticated by the Arvados server at
- # "zzzzz.example.com", use:
- #
- # remote_hosts:
- # zzzzz: zzzzz.example.com
- remote_hosts: {}
-
- # Use {prefix}.arvadosapi.com for any prefix not given in
- # remote_hosts above.
- remote_hosts_via_dns: false
-
- # List of cluster prefixes. These are "trusted" clusters, users
- # from the clusters listed here will be automatically setup and
- # activated. This is separate from the settings
- # auto_setup_new_users and new_users_are_active.
- auto_activate_users_from: []
-
- ###
- ### Remaining assorted configuration options.
- ###
-
- arvados_theme: default
-
- # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the
- # Single Sign On (sso) server and remote Arvados sites. Should only
- # be enabled during development when the SSO server is using a
- # self-signed cert.
- sso_insecure: false
## Set Time.zone default to the specified zone and make Active
## Record auto-convert to this zone. Run "rake -D time" for a list
# Version of your assets, change this if you want to expire all your assets
assets.version: "1.0"
- # Allow clients to create collections by providing a manifest with
- # unsigned data blob locators. IMPORTANT: This effectively disables
- # access controls for data stored in Keep: a client who knows a hash
- # can write a manifest that references the hash, pass it to
- # collections.create (which will create a permission link), use
- # collections.get to obtain a signature for that data locator, and
- # use that signed locator to retrieve the data from Keep. Therefore,
- # do not turn this on if your users expect to keep data private from
- # one another!
- permit_create_collection_with_unsigned_manifest: false
-
default_openid_prefix: https://www.google.com/accounts/o8/id
# Override the automatic version string. With the default value of
# (included in vendor packages).
package_version: false
- # Default value for container_count_max for container requests. This is the
- # number of times Arvados will create a new container to satisfy a container
- # request. If a container is cancelled it will retry a new container if
- # container_count < container_count_max on any container requests associated
- # with the cancelled container.
- container_count_max: 3
-
- # Default value for keep_cache_ram of a container's runtime_constraints.
- container_default_keep_cache_ram: 268435456
-
- # Token to be included in all healthcheck requests. Disabled by default.
- # Server expects request header of the format "Authorization: Bearer xxx"
- ManagementToken: false
-
- # URL of keep-web service. Provides read/write access to collections via
- # HTTP and WebDAV protocols.
- #
- # Example:
- # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/
- keep_web_service_url: false
-
- # If true, enable collection versioning.
- # When a collection's preserve_version field is true or the current version
- # is older than the amount of seconds defined on preserve_version_if_idle,
- # a snapshot of the collection's previous state is created and linked to
- # the current collection.
- collection_versioning: false
- # 0 = auto-create a new version on every update.
- # -1 = never auto-create new versions.
- # > 0 = auto-create a new version when older than the specified number of seconds.
- preserve_version_if_idle: -1
-
- # Number of times a container can be unlocked before being
- # automatically cancelled.
- max_container_dispatch_attempts: 5
-
development:
force_ssl: false
cache_classes: false
test:
force_ssl: false
cache_classes: true
- public_file_server:
- enabled: true
- headers:
- 'Cache-Control': public, max-age=3600
whiny_nils: true
consider_all_requests_local: true
action_controller.perform_caching: false
# The following is to avoid SafeYAML's warning message
SafeYAML::OPTIONS[:default_mode] = :safe
+ require_relative "arvados_config.rb"
+
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+#
+# Load Arvados configuration from /etc/arvados/config.yml, using defaults
+# from config.default.yml
+#
+# Existing application.yml is migrated into the new config structure.
+# Keys in the legacy application.yml take precedence.
+#
+# Use "bundle exec config:dump" to get the complete active configuration
+#
+# Use "bundle exec config:migrate" to migrate application.yml and
+# database.yml to config.yml. After adding the output of
+# config:migrate to /etc/arvados/config.yml, you will be able to
+# delete application.yml and database.yml.
+
+require 'config_loader'
+
+begin
+ # If secret_token.rb exists here, we need to load it first.
+ require_relative 'secret_token.rb'
+rescue LoadError
+ # Normally secret_token.rb is missing and the secret token is
+ # configured by application.yml (i.e., here!) instead.
+end
+
+if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
+ not defined? WARNED_OMNIAUTH_CONFIG)
+ Rails.logger.warn <<-EOS
+DEPRECATED CONFIGURATION:
+ Please move your SSO provider config into config/application.yml
+ and delete config/initializers/omniauth.rb.
+EOS
+ # Real values will be copied from globals by omniauth_init.rb. For
+ # now, assign some strings so the generic *.yml config loader
+ # doesn't overwrite them or complain that they're missing.
+ Rails.configuration.Login["ProviderAppID"] = 'xxx'
+ Rails.configuration.Login["ProviderAppSecret"] = 'xxx'
+ Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx'
+ WARNED_OMNIAUTH_CONFIG = true
+end
+
+# Load the defaults
+$arvados_config_defaults = ConfigLoader.load "#{::Rails.root.to_s}/config/config.default.yml"
+if $arvados_config_defaults.empty?
+ raise "Missing #{::Rails.root.to_s}/config/config.default.yml"
+end
+
+clusterID, clusterConfig = $arvados_config_defaults["Clusters"].first
+$arvados_config_defaults = clusterConfig
+$arvados_config_defaults["ClusterID"] = clusterID
+
+# Initialize the global config with the defaults
+$arvados_config_global = $arvados_config_defaults.deep_dup
+
+# Load the global config file
+confs = ConfigLoader.load "/etc/arvados/config.yml"
+if !confs.empty?
+ clusterID, clusterConfig = confs["Clusters"].first
+ $arvados_config_global["ClusterID"] = clusterID
+
+ # Copy the cluster config over the defaults
+ $arvados_config_global.deep_merge!(clusterConfig)
+end
+
+# Now make a copy
+$arvados_config = $arvados_config_global.deep_dup
+
+# Declare all our configuration items.
+arvcfg = ConfigLoader.new
+arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
+arvcfg.declare_config "ManagementToken", String, :ManagementToken
+arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
+arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods
+arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
+arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
+arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
+arvcfg.declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
+arvcfg.declare_config "API.RailsSessionSecretToken", NonemptyString, :secret_token
+arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
+arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
+arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
+arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
+arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
+arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
+arvcfg.declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
+arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
+arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
+arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
+arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
+arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
+arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
+arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
+arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
+arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
+arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
+arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
+arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
+arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
+arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
+arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
+arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
+arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
+arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
+arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
+arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
+arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
+arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
+arvcfg.declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
+arvcfg.declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
+arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
+arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
+arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
+arvcfg.declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
+arvcfg.declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
+arvcfg.declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
+arvcfg.declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
+arvcfg.declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
+arvcfg.declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
+arvcfg.declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
+arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
+arvcfg.declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
+arvcfg.declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+arvcfg.declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
+arvcfg.declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
+arvcfg.declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
+arvcfg.declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+arvcfg.declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
+arvcfg.declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
+arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
+arvcfg.declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
+arvcfg.declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
+arvcfg.declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
+ h = {}
+ v.each do |clusterid, host|
+ h[clusterid] = {
+ "Host" => host,
+ "Proxy" => true,
+ "Scheme" => "https",
+ "Insecure" => false,
+ "ActivateUsers" => false
+ }
+ end
+ ConfigLoader.set_cfg cfg, "RemoteClusters", h
+}
+arvcfg.declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
+
+dbcfg = ConfigLoader.new
+
+dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
+dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
+dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
+dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
+dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
+dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
+dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template
+dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding
+
+application_config = {}
+%w(application.default application).each do |cfgfile|
+ path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+ confs = ConfigLoader.load(path, erb: true)
+ # Ignore empty YAML file:
+ next if confs == false
+ application_config.deep_merge!(confs['common'] || {})
+ application_config.deep_merge!(confs[::Rails.env.to_s] || {})
+end
+
+db_config = {}
+path = "#{::Rails.root.to_s}/config/database.yml"
+if File.exist? path
+ db_config = ConfigLoader.load(path, erb: true)
+end
+
+$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)
+dbcfg.migrate_config(db_config[::Rails.env.to_s] || {}, $arvados_config)
+
+if application_config[:auto_activate_users_from]
+ application_config[:auto_activate_users_from].each do |cluster|
+ if $arvados_config.RemoteClusters[cluster]
+ $arvados_config.RemoteClusters[cluster]["ActivateUsers"] = true
+ end
+ end
+end
+
+# Checks for wrongly typed configuration items, coerces properties
+# into correct types (such as Duration), and optionally raise error
+# for essential configuration that can't be empty.
+arvcfg.coercion_and_check $arvados_config_defaults, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config_global, check_nonempty: false
+arvcfg.coercion_and_check $arvados_config, check_nonempty: true
+dbcfg.coercion_and_check $arvados_config, check_nonempty: true
+
+# * $arvados_config_defaults is the defaults
+# * $arvados_config_global is $arvados_config_defaults merged with the contents of /etc/arvados/config.yml
+# These are used by the rake config: tasks
+#
+# * $arvados_config is $arvados_config_global merged with the migrated contents of application.yml
+# This is what actually gets copied into the Rails configuration object.
+
+if $arvados_config["Collections"]["DefaultTrashLifetime"] < 86400.seconds then
+ raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections.DefaultTrashLifetime
+end
+
+#
+# Special case for test database where there's no database.yml,
+# because the Arvados config.yml doesn't have a concept of multiple
+# rails environments.
+#
+if ::Rails.env.to_s == "test" && db_config["test"].nil?
+ $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test"
+end
+
+if $arvados_config["PostgreSQL"]["Connection"]["Password"].empty?
+ raise "Database password is empty, PostgreSQL section is: #{$arvados_config["PostgreSQL"]}"
+end
+
+dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"]
+if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0
+ dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}"
+end
+
+#
+# If DATABASE_URL is set, then ActiveRecord won't error out if database.yml doesn't exist.
+#
+# For config migration, we've previously populated the PostgreSQL
+# section of the config from database.yml
+#
+ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:"+
+ "#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@"+
+ "#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?"+
+ "template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&"+
+ "encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&"+
+ "pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
+
+Server::Application.configure do
+ # Copy into the Rails config object. This also turns Hash into
+ # OrderedOptions so that application code can use
+ # Rails.configuration.API.Blah instead of
+ # Rails.configuration.API["Blah"]
+ ConfigLoader.copy_into_config $arvados_config, config
+ ConfigLoader.copy_into_config $remaining_config, config
+ config.secret_key_base = config.secret_token
+end
--- /dev/null
+../../../lib/config/config.default.yml
\ No newline at end of file
# Config must be done before we files; otherwise they
# won't be able to use Rails.configuration.* to initialize their
# classes.
-require_relative 'load_config.rb'
require 'enable_jobs_api'
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-begin
- # If secret_token.rb exists here, we need to load it first.
- require_relative 'secret_token.rb'
-rescue LoadError
- # Normally secret_token.rb is missing and the secret token is
- # configured by application.yml (i.e., here!) instead.
-end
-
-if (File.exist?(File.expand_path '../omniauth.rb', __FILE__) and
- not defined? WARNED_OMNIAUTH_CONFIG)
- Rails.logger.warn <<-EOS
-DEPRECATED CONFIGURATION:
- Please move your SSO provider config into config/application.yml
- and delete config/initializers/omniauth.rb.
-EOS
- # Real values will be copied from globals by omniauth_init.rb. For
- # now, assign some strings so the generic *.yml config loader
- # doesn't overwrite them or complain that they're missing.
- Rails.configuration.sso_app_id = 'xxx'
- Rails.configuration.sso_app_secret = 'xxx'
- Rails.configuration.sso_provider_url = '//xxx'
- WARNED_OMNIAUTH_CONFIG = true
-end
-
-$application_config = {}
-
-%w(application.default application).each do |cfgfile|
- path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
- if File.exist? path
- yaml = ERB.new(IO.read path).result(binding)
- confs = YAML.load(yaml, deserialize_symbols: true)
- # Ignore empty YAML file:
- next if confs == false
- $application_config.merge!(confs['common'] || {})
- $application_config.merge!(confs[::Rails.env.to_s] || {})
- end
-end
-
-Server::Application.configure do
- nils = []
- $application_config.each do |k, v|
- # "foo.bar: baz" --> { config.foo.bar = baz }
- cfg = config
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg.send(kk)
- end
- if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
- # Config must have been set already in environments/*.rb.
- #
- # After config files have been migrated, this mechanism should
- # be deprecated, then removed.
- elsif v.nil?
- # Config variables are not allowed to be nil. Make a "naughty"
- # list, and present it below.
- nils << k
- else
- cfg.send "#{k}=", v
- end
- end
- if !nils.empty?
- raise <<EOS
-Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
-
-The following configuration settings must be specified in
-config/application.yml:
-* #{nils.join "\n* "}
-
-EOS
- end
- config.secret_key_base = config.secret_token
-end
end
params_s = SafeJSON.dump(params)
- if params_s.length > Rails.configuration.max_request_log_params_size
- payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
+ if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]
+ payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]"
else
payload[:params] = params
end
if defined? CUSTOM_PROVIDER_URL
Rails.logger.warn "Copying omniauth from globals in legacy config file."
- Rails.configuration.sso_app_id = APP_ID
- Rails.configuration.sso_app_secret = APP_SECRET
- Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+ Rails.configuration.Login["ProviderAppID"] = APP_ID
+ Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET
+ Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL
else
Rails.application.config.middleware.use OmniAuth::Builder do
provider(:josh_id,
- Rails.configuration.sso_app_id,
- Rails.configuration.sso_app_secret,
- Rails.configuration.sso_provider_url)
+ Rails.configuration.Login["ProviderAppID"],
+ Rails.configuration.Login["ProviderAppSecret"],
+ Rails.configuration.Services["SSO"]["ExternalURL"])
end
OmniAuth.config.on_failure = StaticController.action(:login_failure)
end
# Config must be done before we load model class files; otherwise they
# won't be able to use Rails.configuration.* to initialize their
# classes.
-require_relative 'load_config.rb'
if Rails.env == 'development'
Dir.foreach("#{Rails.root}/app/models") do |model_file|
('20190214214814'),
('20190322174136');
+
end
def self.tidy_in_background
- max_age = Rails.configuration.max_audit_log_age
- max_batch = Rails.configuration.max_audit_log_delete_batch
+ max_age = Rails.configuration.AuditLogs.MaxAge
+ max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch
return if max_age <= 0 || max_batch <= 0
exp = (max_age/14).seconds
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module Psych
+ module Visitors
+ class YAMLTree < Psych::Visitors::Visitor
+ def visit_ActiveSupport_Duration o
+ seconds = o.to_i
+ outstr = ""
+ if seconds / 3600 > 0
+ outstr += "#{seconds / 3600}h"
+ seconds = seconds % 3600
+ end
+ if seconds / 60 > 0
+ outstr += "#{seconds / 60}m"
+ seconds = seconds % 60
+ end
+ if seconds > 0
+ outstr += "#{seconds}s"
+ end
+ if outstr == ""
+ outstr = "0s"
+ end
+ @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_URI_Generic o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_URI_HTTP o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_Pathname o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+ end
+ end
+end
+
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+class NonemptyString < String
+end
+
+class ConfigLoader
+ def initialize
+ @config_migrate_map = {}
+ @config_types = {}
+ end
+
+ def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
+ if migrate_from
+ @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
+ ConfigLoader.set_cfg cfg, assign_to, v
+ }
+ end
+ @config_types[assign_to] = configtype
+ end
+
+
+ def migrate_config from_config, to_config
+ remainders = {}
+ from_config.each do |k, v|
+ if @config_migrate_map[k.to_sym]
+ @config_migrate_map[k.to_sym].call to_config, k, v
+ else
+ remainders[k] = v
+ end
+ end
+ remainders
+ end
+
+ def coercion_and_check check_cfg, check_nonempty: true
+ @config_types.each do |cfgkey, cfgtype|
+ cfg = check_cfg
+ k = cfgkey
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+
+ if cfg.nil?
+ raise "missing #{cfgkey}"
+ end
+
+ if cfgtype == String and !cfg[k]
+ cfg[k] = ""
+ end
+
+ if cfgtype == String and cfg[k].is_a? Symbol
+ cfg[k] = cfg[k].to_s
+ end
+
+ if cfgtype == Pathname and cfg[k].is_a? String
+
+ if cfg[k] == ""
+ cfg[k] = Pathname.new("")
+ else
+ cfg[k] = Pathname.new(cfg[k])
+ if !cfg[k].exist?
+ raise "#{cfgkey} path #{cfg[k]} does not exist"
+ end
+ end
+ end
+
+ if cfgtype == NonemptyString
+ if (!cfg[k] || cfg[k] == "") && check_nonempty
+ raise "#{cfgkey} cannot be empty"
+ end
+ if cfg[k].is_a? String
+ next
+ end
+ end
+
+ if cfgtype == ActiveSupport::Duration
+ if cfg[k].is_a? Integer
+ cfg[k] = cfg[k].seconds
+ elsif cfg[k].is_a? String
+ cfg[k] = ConfigLoader.parse_duration cfg[k]
+ end
+ end
+
+ if cfgtype == URI
+ cfg[k] = URI(cfg[k])
+ end
+
+ if !cfg[k].is_a? cfgtype
+ raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+ end
+ end
+ end
+
+ def self.set_cfg cfg, k, v
+ # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+ if !cfg.nil?
+ cfg[k] = v
+ end
+ end
+
+ def self.parse_duration durstr
+ duration_re = /(\d+(\.\d+)?)(s|m|h)/
+ dursec = 0
+ while durstr != ""
+ mt = duration_re.match durstr
+ if !mt
+ raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h"
+ end
+ multiplier = {s: 1, m: 60, h: 3600}
+ dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+ durstr = durstr[mt[0].length..-1]
+ end
+ return dursec.seconds
+ end
+
+ def self.copy_into_config src, dst
+ src.each do |k, v|
+ dst.send "#{k}=", self.to_OrderedOptions(v)
+ end
+ end
+
+ def self.to_OrderedOptions confs
+ if confs.is_a? Hash
+ opts = ActiveSupport::OrderedOptions.new
+ confs.each do |k,v|
+ opts[k] = self.to_OrderedOptions(v)
+ end
+ opts
+ elsif confs.is_a? Array
+ confs.map { |v| self.to_OrderedOptions v }
+ else
+ confs
+ end
+ end
+
+ def self.load path, erb: false
+ if File.exist? path
+ yaml = IO.read path
+ if erb
+ yaml = ERB.new(yaml).result(binding)
+ end
+ YAML.load(yaml, deserialize_symbols: false)
+ else
+ {}
+ end
+ end
+
+end
@cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
@srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
- @arvados_internal = Rails.configuration.git_internal_dir
+ @arvados_internal = Rails.configuration.Containers.JobsAPI.GitInternalDir
if not File.exist? @arvados_internal
$stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
raise "No internal git repository available" unless ($? == 0)
end
- @repo_root = Rails.configuration.git_repositories_dir
+ @repo_root = Rails.configuration.Git.Repositories
@arvados_repo_path = Repository.where(name: "arvados").first.server_path
@authorizations = {}
@did_recently = {}
end
def update_node_status
- return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/)
+ return unless Rails.configuration.Containers.JobsAPI.CrunchJobWrapper.to_s.match(/^slurm/)
slurm_status.each_pair do |hostname, slurmdata|
next if @node_state[hostname] == slurmdata
begin
next if @running[job.uuid]
cmd_args = nil
- case Server::Application.config.crunch_job_wrapper
- when :none
+ case Rails.configuration.Containers.JobsAPI.CrunchJobWrapper
+ when "none"
if @running.size > 0
# Don't run more than one at a time.
return
end
cmd_args = []
- when :slurm_immediate
+ when "slurm_immediate"
nodelist = nodes_available_for_job(job)
if nodelist.nil?
if Time.now < @node_wait_deadline
"--job-name=#{job.uuid}",
"--nodelist=#{nodelist.join(',')}"]
else
- raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
+ raise "Unknown crunch_job_wrapper: #{Rails.configuration.Containers.JobsAPI.CrunchJobWrapper}"
end
cmd_args = sudo_preface + cmd_args
bytes_logged: 0,
events_logged: 0,
log_throttle_is_open: true,
- log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+ log_throttle_reset_time: Time.now + Rails.configuration.Containers.Logging.LogThrottlePeriod,
log_throttle_bytes_so_far: 0,
log_throttle_lines_so_far: 0,
log_throttle_bytes_skipped: 0,
matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/)
if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]')
partial_line = true
- if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period
+ if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod
running_job[:log_throttle_partial_line_last_at] = Time.now
else
skip_counts = true
end
if (running_job[:bytes_logged] >
- Rails.configuration.crunch_limit_log_bytes_per_job)
- message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+ Rails.configuration.Containers.Logging.LimitLogBytesPerJob)
+ message = "Exceeded log limit #{Rails.configuration.Containers.Logging.LimitLogBytesPerJob} bytes (LimitLogBytesPerJob). Log will be truncated."
running_job[:log_throttle_reset_time] = Time.now + 100.years
running_job[:log_throttle_is_open] = false
elsif (running_job[:log_throttle_bytes_so_far] >
- Rails.configuration.crunch_log_throttle_bytes)
+ Rails.configuration.Containers.Logging.LogThrottleBytes)
remaining_time = running_job[:log_throttle_reset_time] - Time.now
- message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds."
+ message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleBytes} bytes per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleBytes). Logging will be silenced for the next #{remaining_time.round} seconds."
running_job[:log_throttle_is_open] = false
elsif (running_job[:log_throttle_lines_so_far] >
- Rails.configuration.crunch_log_throttle_lines)
+ Rails.configuration.Containers.Logging.LogThrottleLines)
remaining_time = running_job[:log_throttle_reset_time] - Time.now
- message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds."
+ message = "Exceeded rate #{Rails.configuration.Containers.Logging.LogThrottleLines} lines per #{Rails.configuration.Containers.Logging.LogThrottlePeriod} seconds (LogThrottleLines), logging will be silenced for the next #{remaining_time.round} seconds."
running_job[:log_throttle_is_open] = false
elsif partial_line and running_job[:log_throttle_first_partial_line]
running_job[:log_throttle_first_partial_line] = false
- message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds."
+ message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod} seconds."
end
end
j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
end
- j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+ j[:log_throttle_reset_time] = now + Rails.configuration.Containers.Logging.LogThrottlePeriod
j[:log_throttle_bytes_so_far] = 0
j[:log_throttle_lines_so_far] = 0
j[:log_throttle_bytes_skipped] = 0
bufend = ''
streambuf.each_line do |line|
if not line.end_with? $/
- if line.size > Rails.configuration.crunch_log_throttle_bytes
+ if line.size > Rails.configuration.Containers.Logging.LogThrottleBytes
# Without a limit here, we'll use 2x an arbitrary amount
# of memory, and waste a lot of time copying strings
# around, all without providing any feedback to anyone
# This is how crunch-job child procs know where the "refresh"
# trigger file is
- ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+ ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger
# If salloc can't allocate resources immediately, make it use our
# temporary failure exit code. This ensures crunch-dispatch won't
end
def sudo_preface
- return [] if not Server::Application.config.crunch_job_user
+ return [] if not Rails.configuration.Containers.JobsAPI.CrunchJobUser
["sudo", "-E", "-u",
- Server::Application.config.crunch_job_user,
+ Rails.configuration.Containers.JobsAPI.CrunchJobUser,
"LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}",
"PATH=#{ENV['PATH']}",
"PERLLIB=#{ENV['PERLLIB']}",
# Send out to log event if buffer size exceeds the bytes per event or if
# it has been at least crunch_log_seconds_between_events seconds since
# the last flush.
- if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
- (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+ if running_job[:stderr_buf_to_flush].size > Rails.configuration.Containers.Logging.LogBytesPerEvent or
+ (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.Containers.Logging.LogSecondsBetweenEvents
begin
log = Log.new(object_uuid: running_job[:job].uuid,
event_type: 'stderr',
# An array of job_uuids in squeue
def squeue_jobs
- if Rails.configuration.crunch_job_wrapper == :slurm_immediate
+ if Rails.configuration.Containers.JobsAPI.CrunchJobWrapper == "slurm_immediate"
p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
begin
p.readlines.map {|line| line.strip}
end
def system_user_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
User.uuid_prefix,
'000000000000000'].join('-')
end
def system_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'000000000000000'].join('-')
end
def anonymous_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'anonymouspublic'].join('-')
end
def anonymous_user_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
User.uuid_prefix,
'anonymouspublic'].join('-')
end
end
def all_users_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'fffffffffffffff'].join('-')
end
"job_tasks.show"]
def check_enable_legacy_jobs_api
- if Rails.configuration.enable_legacy_jobs_api == false ||
- (Rails.configuration.enable_legacy_jobs_api == "auto" &&
+ if Rails.configuration.Containers.JobsAPI.Enable == "false" ||
+ (Rails.configuration.Containers.JobsAPI.Enable == "auto" &&
Job.count == 0)
- Rails.configuration.disable_api_methods += Disable_jobs_api_method_list
+ Rails.configuration.API.DisabledAPIs += Disable_jobs_api_method_list
end
end
Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
end
def generate_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
self.uuid_prefix,
rand(2**256).to_s(36)[-15..-1]].
join '-'
options.client_options[:site] = options[:custom_provider_url]
options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
- if Rails.configuration.sso_insecure
+ if Rails.configuration.TLS.Insecure
options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
end
::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
raise ArgumentError.new("Invalid value for limit parameter")
end
@limit = [params[:limit].to_i,
- Rails.configuration.max_items_per_response].min
+ Rails.configuration.API.MaxItemsPerResponse].min
else
@limit = DEFAULT_LIMIT
end
# doing expensive things like database queries, and we want to skip
# those when logging is disabled.
def log_reuse_info(candidates=nil)
- if Rails.configuration.log_reuse_decisions
+ if Rails.configuration.Containers.LogReuseDecisions
msg = yield
if !candidates.nil?
msg = "have #{candidates.count} candidates " + msg
end
def refresh_permission_view(async=false)
- if async and Rails.configuration.async_permissions_update_interval > 0
- exp = Rails.configuration.async_permissions_update_interval.seconds
+ if async and Rails.configuration.API.AsyncPermissionsUpdateInterval > 0
+ exp = Rails.configuration.API.AsyncPermissionsUpdateInterval.seconds
need = false
Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do
need = true
end
def self.sweep_if_stale
- return if Rails.configuration.trash_sweep_interval <= 0
- exp = Rails.configuration.trash_sweep_interval.seconds
+ return if Rails.configuration.Collections.TrashSweepInterval <= 0
+ exp = Rails.configuration.Collections.TrashSweepInterval.seconds
need = false
Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
need = true
--- /dev/null
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def diff_hash base, final
+ diffed = {}
+ base.each do |k,v|
+ bk = base[k]
+ fk = final[k]
+ if bk.is_a? Hash
+ d = diff_hash bk, fk
+ if d.length > 0
+ diffed[k] = d
+ end
+ else
+ if bk.to_yaml != fk.to_yaml
+ diffed[k] = fk
+ end
+ end
+ end
+ diffed
+end
+
+namespace :config do
+ desc 'Print items that differ between legacy application.yml and system config.yml'
+ task diff: :environment do
+ diffed = diff_hash $arvados_config_global, $arvados_config
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+ if cfg["Clusters"][$arvados_config["ClusterID"]].empty?
+ puts "No migrations required for /etc/arvados/config.yml"
+ else
+ puts cfg.to_yaml
+ end
+ end
+
+ desc 'Print config.yml after merging with legacy application.yml'
+ task migrate: :environment do
+ diffed = diff_hash $arvados_config_defaults, $arvados_config
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+ puts cfg.to_yaml
+ end
+
+ desc 'Print configuration as accessed through Rails.configuration'
+ task dump: :environment do
+ combined = $arvados_config.deep_dup
+ combined.update $remaining_config
+ puts combined.to_yaml
+ end
+end
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
- desc 'Ensure site configuration has all required settings'
- task check: :environment do
- $stderr.puts "%-32s %s" % ["AppVersion (discovered)", AppVersion.hash]
- $application_config.sort.each do |k, v|
- if ENV.has_key?('QUIET') then
- # Make sure we still check for the variable to exist
- eval("Rails.configuration.#{k}")
- else
- if /(password|secret|signing_key)/.match(k) then
- # Make sure we still check for the variable to exist, but don't print the value
- eval("Rails.configuration.#{k}")
- $stderr.puts "%-32s %s" % [k, '*********']
- else
- $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
- end
- end
- end
- # default_trash_lifetime cannot be less than 24 hours
- if Rails.configuration.default_trash_lifetime < 86400 then
- raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
- end
- end
-end
+++ /dev/null
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-namespace :config do
- desc 'Show site configuration'
- task dump: :environment do
- puts $application_config.to_yaml
- end
-end
desc "Remove old container log entries from the logs table"
task delete_old_container_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
namespace :db do
desc "Remove old job stderr entries from the logs table"
task delete_old_job_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers.Logging.MaxAge} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
if trash_at.nil?
self.delete_at = nil
else
- self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds
+ self.delete_at = trash_at + Rails.configuration.Collections.DefaultTrashLifetime.seconds
end
elsif !trash_at || !delete_at || trash_at > delete_at
# Not trash, or bogus arguments? Just validate in
earliest_delete = [
@validation_timestamp,
trash_at_was,
- ].compact.min + Rails.configuration.blob_signature_ttl.seconds
+ ].compact.min + Rails.configuration.Collections.BlobSigningTTL.seconds
# The previous value of delete_at is also an upper bound on the
# longest-lived permission token. For example, if TTL=14,
@object.update_attributes!(trash_at: db_current_time)
end
earliest_delete = (@object.trash_at +
- Rails.configuration.blob_signature_ttl.seconds)
+ Rails.configuration.Collections.BlobSigningTTL.seconds)
if @object.delete_at > earliest_delete
@object.update_attributes!(delete_at: earliest_delete)
end
def permit_unsigned_manifests isok=true
# Set security model for the life of a test.
- Rails.configuration.permit_create_collection_with_unsigned_manifest = isok
+ Rails.configuration.Collections.BlobSigning = !isok
end
def assert_signed_manifest manifest_text, label='', token: false
exp = tok[/\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16)
sig = Blob.sign_locator(
bare,
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
expire: exp,
api_token: token)[/\+A[^\+]*/, 0]
assert_includes tok, sig
token = api_client_authorizations(:active).send(token_method)
signed = Blob.sign_locator(
'acbd18db4cc2f85cedef654fccc4a4d8+3',
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: token)
authorize_with_token token
put :update, params: {
def request_capped_index(params={})
authorize_with :user1_with_load
coll1 = collections(:collection_1_of_201)
- Rails.configuration.max_index_database_read =
+ Rails.configuration.API.MaxIndexDatabaseRead =
yield(coll1.manifest_text.size)
get :index, params: {
select: %w(uuid manifest_text),
# Build a manifest with both signed and unsigned locators.
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
signed_locators = locators.collect do |x|
# TODO(twp): in phase 4, all locators will need to be signed, so
# this test should break and will need to be rewritten. Issue #2755.
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
ttl: 3600 # 1 hour
}
test "create fails with invalid signature" do
authorize_with :active
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
test "create fails with uuid of signed manifest" do
authorize_with :active
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
ea10d51bcf88862dbcc36eb292017dfd+45)
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
[1, 5, nil].each do |ask|
test "Set replication_desired=#{ask.inspect}" do
- Rails.configuration.default_collection_replication = 2
+ Rails.configuration.Collections.DefaultReplication = 2
authorize_with :active
put :update, params: {
id: collections(:replication_undesired_unconfirmed).uuid,
assert_response 200
c = Collection.find_by_uuid(uuid)
assert_operator c.trash_at, :<, db_current_time
- assert_equal c.delete_at, c.trash_at + Rails.configuration.blob_signature_ttl
+ assert_equal c.delete_at, c.trash_at + Rails.configuration.Collections.BlobSigningTTL
end
test 'delete long-trashed collection immediately using http DELETE verb' do
assert_response 200
c = Collection.find_by_uuid(uuid)
assert_operator c.trash_at, :<, db_current_time
- assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.default_trash_lifetime
+ assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.Collections.DefaultTrashLifetime
end
end
end
test "update collection with versioning enabled" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 1 # 1 second
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second
col = collections(:collection_owned_by_active)
assert_equal 2, col.version
token = api_client_authorizations(:active).v2token
signed = Blob.sign_locator(
'acbd18db4cc2f85cedef654fccc4a4d8+3',
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: token)
authorize_with_token token
put :update, params: {
format: :json,
count: 'none',
limit: 1000,
- filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+ filters: [['any', '@@', Rails.configuration.ClusterID]],
}
assert_response :success
limit: 1000,
offset: '5',
last_object_class: 'PipelineInstance',
- filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+ filters: [['any', '@@', Rails.configuration.ClusterID]],
}
assert_response :success
end
test 'get contents with jobs and pipeline instances disabled' do
- Rails.configuration.disable_api_methods = ['jobs.index', 'pipeline_instances.index']
+ Rails.configuration.API.DisabledAPIs = ['jobs.index', 'pipeline_instances.index']
authorize_with :active
get :contents, params: {
test 'get contents with low max_index_database_read' do
# Some result will certainly have at least 12 bytes in a
# restricted column
- Rails.configuration.max_index_database_read = 12
+ Rails.configuration.API.MaxIndexDatabaseRead = 12
authorize_with :active
get :contents, params: {
id: groups(:aproject).uuid,
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
rescue Errno::ENOENT
end
'server should correct bogus cancelled_at ' +
job['cancelled_at'])
assert_equal(true,
- File.exist?(Rails.configuration.crunch_refresh_trigger),
+ File.exist?(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger),
'trigger file should be created when job is cancelled')
end
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
rescue Errno::ENOENT
end
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers.JobsAPI.CrunchRefreshTrigger)
rescue Errno::ENOENT
end
end
test 'jobs.create disabled in config' do
- Rails.configuration.disable_api_methods = ["jobs.create",
+ Rails.configuration.API.DisabledAPIs = ["jobs.create",
"pipeline_instances.create"]
authorize_with :active
post :create, params: {
end
test "node should fail ping with invalid hostname config format" do
- Rails.configuration.assign_node_hostname = 'compute%<slot_number>04' # should end with "04d"
+ Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%<slot_number>04' # should end with "04d"
post :ping, params: {
id: nodes(:new_with_no_hostname).uuid,
ping_secret: nodes(:new_with_no_hostname).info['ping_secret'],
end
[
- {cfg: :git_repo_ssh_base, cfgval: "git@example.com:", match: %r"^git@example.com:"},
- {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git@git.zzzzz.arvadosapi.com:"},
- {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ },
- {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"},
- {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
- {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ },
+ {cfg: "GitSSH.ExternalURL", cfgval: URI("ssh://git@example.com"), match: %r"^git@example.com:"},
+ {cfg: "GitSSH.ExternalURL", cfgval: URI(""), match: %r"^git@git.zzzzz.arvadosapi.com:"},
+ {cfg: "GitSSH", cfgval: false, refute: /^git@/ },
+ {cfg: "GitHTTP.ExternalURL", cfgval: URI("https://example.com/"), match: %r"^https://example.com/"},
+ {cfg: "GitHTTP.ExternalURL", cfgval: URI(""), match: %r"^https://git.zzzzz.arvadosapi.com/"},
+ {cfg: "GitHTTP", cfgval: false, refute: /^http/ },
].each do |expect|
test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
- Rails.configuration.send expect[:cfg].to_s+"=", expect[:cfgval]
+ ConfigLoader.set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
authorize_with :active
get :index
assert_response :success
assert_response :success
discovery_doc = JSON.parse(@response.body)
assert_includes discovery_doc, 'defaultTrashLifetime'
- assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
+ assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections.DefaultTrashLifetime
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
assert_match(/^unknown$/, discovery_doc['packageVersion'])
- assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
- assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+ assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services.Websocket.ExternalURL.to_s
+ assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services.Workbench1.ExternalURL.to_s
assert_equal('zzzzz', discovery_doc['uuidPrefix'])
end
end
test "non-empty disable_api_methods" do
- Rails.configuration.disable_api_methods =
+ Rails.configuration.API.DisabledAPIs =
['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
get :index
assert_response :success
setup_email = ActionMailer::Base.deliveries.last
assert_not_nil setup_email, 'Expected email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+ assert_equal Rails.configuration.Users.UserNotifierEmailFrom, setup_email.from[0]
assert_equal 'foo@example.com', setup_email.to[0]
assert_equal 'Welcome to Arvados - shell account enabled', setup_email.subject
assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),
'Expected Your Arvados shell account has been set up in email body'
- assert (setup_email.body.to_s.include? "#{Rails.configuration.workbench_address}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
+ assert (setup_email.body.to_s.include? "#{Rails.configuration.Services.Workbench1.ExternalURL}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
end
test "setup inactive user by changing is_active to true" do
def self.included base
base.setup do
# Extract the test repository data into the default test
- # environment's Rails.configuration.git_repositories_dir. (We
+ # environment's Rails.configuration.Git.Repositories. (We
# don't use that config setting here, though: it doesn't seem
# worth the risk of stepping on a real git repo root.)
@tmpdir = Rails.root.join 'tmp', 'git'
FileUtils.mkdir_p @tmpdir
system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
- Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
- Rails.configuration.git_internal_dir = "#{@tmpdir}/internal.git"
+ Rails.configuration.Git.Repositories = "#{@tmpdir}/test"
+ Rails.configuration.Containers.JobsAPI.GitInternalDir = "#{@tmpdir}/internal.git"
end
base.teardown do
end
def internal_tag tag
- IO.read "|git --git-dir #{Rails.configuration.git_internal_dir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
+ IO.read "|git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
end
# Intercept fetch_remote_repository and fetch from a specified url
test "store collection as json" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
test "store collection with manifest_text only" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
test "store collection then update name" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections.BlobSigningKey,
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
end
test "create request with async=true defers permissions update" do
- Rails.configuration.async_permissions_update_interval = 1 # second
+ Rails.configuration.API.AsyncPermissionsUpdateInterval = 1 # second
name = "Random group #{rand(1000)}"
assert_equal nil, Group.find_by_name(name)
ready.pop
@remote_server = srv
@remote_host = "127.0.0.1:#{srv.config[:Port]}"
- Rails.configuration.remote_hosts = Rails.configuration.remote_hosts.merge({'zbbbb' => @remote_host,
- 'zbork' => @remote_host})
+ Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({zbbbb: ActiveSupport::InheritableOptions.new({Host: @remote_host}),
+ zbork: ActiveSupport::InheritableOptions.new({Host: @remote_host})})
Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns "https://#{@remote_host}"
@stub_status = 200
@stub_content = {
end
test 'auto-activate user from trusted cluster' do
- Rails.configuration.auto_activate_users_from = ['zbbbb']
+ Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = true
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
].each do |testcase|
test "user auto-activate #{testcase.inspect}" do
# Configure auto_setup behavior according to testcase[:cfg]
- Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto]
- Rails.configuration.auto_setup_new_users_with_vm_uuid =
- (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false)
- Rails.configuration.auto_setup_new_users_with_repository =
+ Rails.configuration.Users.AutoSetupNewUsers = testcase[:cfg][:auto]
+ Rails.configuration.Users.AutoSetupNewUsersWithVmUUID =
+ (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : "")
+ Rails.configuration.Users.AutoSetupNewUsersWithRepository =
testcase[:cfg][:repo]
mock_auth_with(email: testcase[:email])
end
def run_with_expiry(clean_after)
- Rails.configuration.clean_container_log_rows_after = clean_after
+ Rails.configuration.Containers.Logging.MaxAge = clean_after
Rake::Task[TASK_NAME].reenable
Rake.application.invoke_task TASK_NAME
end
end
def run_with_expiry(clean_after)
- Rails.configuration.clean_job_log_rows_after = clean_after
+ Rails.configuration.Containers.Logging.MaxAge = clean_after
Rake::Task[TASK_NAME].reenable
Rake.application.invoke_task TASK_NAME
end
def restore_configuration
# Restore configuration settings changed during tests
- $application_config.each do |k,v|
- if k.match(/^[^.]*$/)
- Rails.configuration.send (k + '='), v
- end
- end
+ ConfigLoader.copy_into_config $arvados_config, Rails.configuration
+ ConfigLoader.copy_into_config $remaining_config, Rails.configuration
end
def set_user_from_auth(auth_name)
expire: 0x7fffffff,
}
- original_ttl = Rails.configuration.blob_signature_ttl
- Rails.configuration.blob_signature_ttl = original_ttl*2
+ original_ttl = Rails.configuration.Collections.BlobSigningTTL
+ Rails.configuration.Collections.BlobSigningTTL = original_ttl*2
signed2 = Blob.sign_locator @@known_locator, {
api_token: @@known_token,
key: @@known_key,
expire: 0x7fffffff,
}
- Rails.configuration.blob_signature_ttl = original_ttl
+ Rails.configuration.Collections.BlobSigningTTL = original_ttl
assert_not_equal signed, signed2
end
end
test "auto-create version after idle setting" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 600 # 10 minutes
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 600 # 10 minutes
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
end
test "preserve_version=false assignment is ignored while being true and not producing a new version" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 3600
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 3600
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
end
test "uuid updates on current version make older versions update their pointers" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
end
test "older versions' modified_at indicate when they're created" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
end
test "past versions should not be directly updatable" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
assert c_old.invalid?
c_old.reload
# Now disable collection versioning, it should behave the same way
- Rails.configuration.collection_versioning = false
+ Rails.configuration.Collections.CollectionVersioning = false
c_old.name = 'this was foo'
assert c_old.invalid?
end
['is_trashed', true, false],
].each do |attr, first_val, second_val|
test "sync #{attr} with older versions" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
[false, 'replication_desired', 5, false],
].each do |versioning, attr, val, new_version_expected|
test "update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version" do
- Rails.configuration.collection_versioning = versioning
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = versioning
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_user users(:active) do
# Create initial collection
c = create_collection 'foo', Encoding::US_ASCII
end
test 'current_version_uuid is ignored during update' do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_user users(:active) do
# Create 1st collection
col1 = create_collection 'foo', Encoding::US_ASCII
end
test 'with versioning enabled, simultaneous updates increment version correctly' do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections.CollectionVersioning = true
+ Rails.configuration.Collections.PreserveVersionIfIdle = 0
act_as_user users(:active) do
# Create initial collection
col = create_collection 'foo', Encoding::US_ASCII
[0, 2, 4, nil].each do |ask|
test "set replication_desired to #{ask.inspect}" do
- Rails.configuration.default_collection_replication = 2
+ Rails.configuration.Collections.DefaultReplication = 2
act_as_user users(:active) do
c = collections(:replication_undesired_unconfirmed)
c.update_attributes replication_desired: ask
name: 'foo',
trash_at: db_current_time + 1.years)
sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
- expect_max_sig_exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+ expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL
assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
end
test test_name do
act_as_user users(:active) do
min_exp = (db_current_time +
- Rails.configuration.blob_signature_ttl.seconds)
+ Rails.configuration.Collections.BlobSigningTTL.seconds)
if fixture_name == :expired_collection
# Fixture-finder shorthand doesn't find trashed collections
# because they're not in the default scope.
end
test 'default trash interval > blob signature ttl' do
- Rails.configuration.default_trash_lifetime = 86400 * 21 # 3 weeks
+ Rails.configuration.Collections.DefaultTrashLifetime = 86400 * 21 # 3 weeks
start = db_current_time
act_as_user users(:active) do
c = Collection.create!(manifest_text: '', name: 'foo')
test 'tag_in_internal_repository creates and updates tags in internal.git' do
authorize_with :active
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir}"
+ gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir}"
IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
refute $?.success?
end
def with_foo_repository
- Dir.chdir("#{Rails.configuration.git_repositories_dir}/#{repositories(:foo).uuid}") do
+ Dir.chdir("#{Rails.configuration.Git.Repositories}/#{repositories(:foo).uuid}") do
must_pipe("git checkout master 2>&1")
yield
end
must_pipe("git -c user.email=x@x -c user.name=X commit -m -")
end
Commit.tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+ gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
assert $?.success?
end
must_pipe("git reset --hard HEAD^")
end
Commit.tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+ gitint = "git --git-dir #{Rails.configuration.Containers.JobsAPI.GitInternalDir.shellescape}"
assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
assert $?.success?
end
test "Container.resolve_container_image(pdh)" do
set_user_from_auth :active
[[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
- Rails.configuration.docker_image_formats = [ver]
+ Rails.configuration.Containers.SupportedDockerImageFormats = [ver]
pdh = collections(coll).portable_data_hash
resolved = Container.resolve_container_image(pdh)
assert_equal resolved, pdh
test "allow unrecognized container when there are remote_hosts" do
set_user_from_auth :active
- Rails.configuration.remote_hosts = {"foooo" => "bar.com"}
+ Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({foooo: ActiveSupport::InheritableOptions.new({Host: "bar.com"})})
Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3')
end
test "migrated docker image" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
add_docker19_migration_link
# Test that it returns only v2 images even though request is for v1 image.
end
test "use unmigrated docker image" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
add_docker19_migration_link
# Test that it returns only supported v1 images even though there is a
end
test "incompatible docker image v1" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
add_docker19_migration_link
# Don't return unsupported v2 image even if we ask for it directly.
end
test "incompatible docker image v2" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
# No migration link, don't return unsupported v1 image,
set_user_from_auth :active
assert_not_nil(trash)
assert_not_nil(delete)
assert_in_delta(trash, now + 1.second, 10)
- assert_in_delta(delete, now + Rails.configuration.blob_signature_ttl.second, 10)
+ assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL.second, 10)
end
def check_output_ttl_1y(now, trash, delete)
[false, ActiveRecord::RecordInvalid],
[true, nil],
].each do |preemptible_conf, expected|
- test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+ test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
sp = {"preemptible" => true}
common_attrs = {cwd: "test",
priority: 1,
output_path: "test",
scheduling_parameters: sp,
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = preemptible_conf
+ Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf
set_user_from_auth :active
cr = create_minimal_req!(common_attrs)
scheduling_parameters: {"preemptible" => false},
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = true
+ Rails.configuration.Containers.UsePreemptibleInstances = true
set_user_from_auth :active
if requesting_c
[false, 'zzzzz-dz642-runningcontainr', nil],
[false, nil, nil],
].each do |preemptible_conf, requesting_c, schedule_preemptible|
- test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+ test "having Rails.configuration.Containers.UsePreemptibleInstances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
common_attrs = {cwd: "test",
priority: 1,
command: ["echo", "hello"],
output_path: "test",
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = preemptible_conf
+ Rails.configuration.Containers.UsePreemptibleInstances = preemptible_conf
set_user_from_auth :active
if requesting_c
state: ContainerRequest::Committed,
mounts: {"test" => {"kind" => "json"}}}
set_user_from_auth :active
- Rails.configuration.preemptible_instances = true
+ Rails.configuration.Containers.UsePreemptibleInstances = true
cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
create_minimal_req!(common_attrs)
end
test "find_reusable method should select higher priority queued container" do
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers.LogReuseDecisions = true
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))
test "find_reusable with logging enabled" do
set_user_from_auth :active
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers.LogReuseDecisions = true
Rails.logger.expects(:info).at_least(3)
Container.find_reusable(REUSABLE_COMMON_ATTRS)
end
end
test "Exceed maximum lock-unlock cycles" do
- Rails.configuration.max_container_dispatch_attempts = 3
+ Rails.configuration.Containers.MaxDispatchAttempts = 3
set_user_from_auth :active
c, cr = minimal_new
test 'override --cgroup-root with CRUNCH_CGROUP_ROOT' do
ENV['CRUNCH_CGROUP_ROOT'] = '/path/to/cgroup'
- Rails.configuration.crunch_job_wrapper = :none
+ Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "none"
act_as_system_user do
j = Job.create(repository: 'active/foo',
script: 'hash',
test 'rate limit of partial line segments' do
act_as_system_user do
- Rails.configuration.crunch_log_partial_line_throttle_period = 1
+ Rails.configuration.Containers.Logging.LogPartialLineThrottlePeriod = 1
job = {}
job[:bytes_logged] = 0
end
test 'scancel orphaned job nodes' do
- Rails.configuration.crunch_job_wrapper = :slurm_immediate
+ Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate"
act_as_system_user do
dispatch = CrunchDispatch.new
end
test 'cancel slurm jobs' do
- Rails.configuration.crunch_job_wrapper = :slurm_immediate
- Rails.configuration.crunch_job_user = 'foobar'
+ Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = "slurm_immediate"
+ Rails.configuration.Containers.JobsAPI.CrunchJobUser = 'foobar'
fake_squeue = IO.popen("echo #{@job[:before_reboot].uuid}")
fake_scancel = IO.popen("true")
IO.expects(:popen).
end
test 'use reboot time' do
- Rails.configuration.crunch_job_wrapper = nil
+ Rails.configuration.Containers.JobsAPI.CrunchJobWrapper = nil
@dispatch.expects(:open).once.with('/proc/stat').
returns open(Rails.root.join('test/fixtures/files/proc_stat'))
@dispatch.fail_jobs(before: 'reboot')
].each do |use_config|
test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do
default_docker_image = collections(:docker_image)[:portable_data_hash]
- Rails.configuration.default_docker_image_for_jobs = default_docker_image if use_config
+ Rails.configuration.Containers.JobsAPI.DefaultDockerImage = default_docker_image if use_config
job = Job.new job_attrs
assert job.valid?, job.errors.full_messages.to_s
'locator' => BAD_COLLECTION,
}.each_pair do |spec_type, image_spec|
test "Job validation fails with nonexistent Docker image #{spec_type}" do
- Rails.configuration.remote_hosts = {}
+ Rails.configuration.RemoteClusters = {}
job = Job.new job_attrs(runtime_constraints:
{'docker_image' => image_spec})
- assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+ assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid")
end
end
end
test "use migrated docker image if requesting old-format image by tag" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
add_docker19_migration_link
job = Job.create!(
job_attrs(
end
test "use migrated docker image if requesting old-format image by pdh" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
add_docker19_migration_link
job = Job.create!(
job_attrs(
[:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
].each do |existing_image, request_image, expect_image|
test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v1']
if existing_image == :docker_image
oldjob = Job.create!(
end
end
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers.SupportedDockerImageFormats = ['v2']
add_docker19_migration_link
# Check that both v1 and v2 images get resolved to v2.
end
test 'find_reusable with logging' do
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers.LogReuseDecisions = true
Rails.logger.expects(:info).at_least(3)
try_find_reusable
end
assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
# ...unless config says to reuse the earlier job in such cases.
- Rails.configuration.reuse_job_if_outputs_differ = true
+ Rails.configuration.Containers.JobsAPI.ReuseJobIfOutputsDiffer = true
j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
assert_equal foobar.uuid, j.uuid
end
end
test 'enable legacy api configuration option = true' do
- Rails.configuration.enable_legacy_jobs_api = true
+ Rails.configuration.Containers.JobsAPI.Enable = "true"
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.disable_api_methods
+ assert_equal [], Rails.configuration.API.DisabledAPIs
end
test 'enable legacy api configuration option = false' do
- Rails.configuration.enable_legacy_jobs_api = false
+ Rails.configuration.Containers.JobsAPI.Enable = "false"
check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+ assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
end
test 'enable legacy api configuration option = auto, has jobs' do
- Rails.configuration.enable_legacy_jobs_api = "auto"
+ Rails.configuration.Containers.JobsAPI.Enable = "auto"
assert Job.count > 0
- assert_equal [], Rails.configuration.disable_api_methods
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.disable_api_methods
+ assert_equal [], Rails.configuration.API.DisabledAPIs
end
test 'enable legacy api configuration option = auto, no jobs' do
- Rails.configuration.enable_legacy_jobs_api = "auto"
+ Rails.configuration.Containers.JobsAPI.Enable = "auto"
act_as_system_user do
Job.destroy_all
end
assert_equal 0, Job.count
- assert_equal [], Rails.configuration.disable_api_methods
+ assert_equal [], Rails.configuration.API.DisabledAPIs
check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+ assert_equal Disable_jobs_api_method_list, Rails.configuration.API.DisabledAPIs
end
end
end
test "non-empty configuration.unlogged_attributes" do
- Rails.configuration.unlogged_attributes = ["manifest_text"]
+ Rails.configuration.AuditLogs.UnloggedAttributes = ["manifest_text"]
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
end
test "empty configuration.unlogged_attributes" do
- Rails.configuration.unlogged_attributes = []
+ Rails.configuration.AuditLogs.UnloggedAttributes = []
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
test 'retain old audit logs with default settings' do
assert_no_logs_deleted do
AuditLogs.delete_old(
- max_age: Rails.configuration.max_audit_log_age,
- max_batch: Rails.configuration.max_audit_log_delete_batch)
+ max_age: Rails.configuration.AuditLogs.MaxAge,
+ max_batch: Rails.configuration.AuditLogs.MaxDeleteBatch)
end
end
test 'delete old audit logs in thread' do
begin
- Rails.configuration.max_audit_log_age = 20
- Rails.configuration.max_audit_log_delete_batch = 100000
+ Rails.configuration.AuditLogs.MaxAge = 20
+ Rails.configuration.AuditLogs.MaxDeleteBatch = 100000
Rails.cache.delete 'AuditLogs'
initial_log_count = Log.unscoped.all.count + 1
act_as_system_user do
end
test "dns_server_conf_template" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
- Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
conffile = Rails.root.join 'tmp', 'compute65535.conf'
File.unlink conffile rescue nil
assert Node.dns_server_update 'compute65535', '127.0.0.1'
end
test "dns_server_restart_command" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
- Rails.configuration.dns_server_reload_command = 'foobar'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
restartfile = Rails.root.join 'tmp', 'restart.txt'
File.unlink restartfile rescue nil
assert Node.dns_server_update 'compute65535', '127.0.0.127'
end
test "dns_server_restart_command fail" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp', 'bogusdir'
- Rails.configuration.dns_server_reload_command = 'foobar'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = Rails.root.join 'tmp', 'bogusdir'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'foobar'
refute Node.dns_server_update 'compute65535', '127.0.0.127'
end
test "dns_server_update_command with valid command" do
testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt')
- Rails.configuration.dns_server_update_command =
+ Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand =
('echo -n "%{hostname} == %{ip_address}" >' +
testfile.to_s.shellescape)
assert Node.dns_server_update 'compute65535', '127.0.0.1'
end
test "dns_server_update_command with failing command" do
- Rails.configuration.dns_server_update_command = 'false %{hostname}'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = 'false %{hostname}'
refute Node.dns_server_update 'compute65535', '127.0.0.1'
end
test "dns update with no commands/dirs configured" do
- Rails.configuration.dns_server_update_command = false
- Rails.configuration.dns_server_conf_dir = false
- Rails.configuration.dns_server_conf_template = 'ignored!'
- Rails.configuration.dns_server_reload_command = 'ignored!'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerUpdateCommand = ""
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = ""
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = 'ignored!'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerReloadCommand = 'ignored!'
assert Node.dns_server_update 'compute65535', '127.0.0.127'
end
test "don't leave temp files behind if there's an error writing them" do
- Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfTemplate = Rails.root.join 'config', 'unbound.template'
Tempfile.any_instance.stubs(:puts).raises(IOError)
Dir.mktmpdir do |tmpdir|
- Rails.configuration.dns_server_conf_dir = tmpdir
+ Rails.configuration.Containers.SLURM.Managed.DNSServerConfDir = tmpdir
refute Node.dns_server_update 'compute65535', '127.0.0.127'
assert_empty Dir.entries(tmpdir).select{|f| File.file? f}
end
end
test "ping new node with no hostname and no config" do
- Rails.configuration.assign_node_hostname = false
+ Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
node = ping_node(:new_with_no_hostname, {})
refute_nil node.slot_number
assert_nil node.hostname
end
test "ping new node with zero padding config" do
- Rails.configuration.assign_node_hostname = 'compute%<slot_number>04d'
+ Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = 'compute%<slot_number>04d'
node = ping_node(:new_with_no_hostname, {})
slot_number = node.slot_number
refute_nil slot_number
end
test "ping node with hostname and no config and expect hostname unchanged" do
- Rails.configuration.assign_node_hostname = false
+ Rails.configuration.Containers.SLURM.Managed.AssignNodeHostname = false
node = ping_node(:new_with_custom_hostname, {})
assert_equal(23, node.slot_number)
assert_equal("custom1", node.hostname)
end
test 'run out of slots' do
- Rails.configuration.max_compute_nodes = 3
+ Rails.configuration.Containers.MaxComputeVMs = 3
act_as_system_user do
Node.destroy_all
(1..4).each do |i|
n = Node.create!
args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] }
- if i <= Rails.configuration.max_compute_nodes
+ if i <= Rails.configuration.Containers.MaxComputeVMs
n.ping(args)
else
assert_raises do
def default_git_url(repo_name, user_name=nil)
if user_name
"git@git.%s.arvadosapi.com:%s/%s.git" %
- [Rails.configuration.uuid_prefix, user_name, repo_name]
+ [Rails.configuration.ClusterID, user_name, repo_name]
else
"git@git.%s.arvadosapi.com:%s.git" %
- [Rails.configuration.uuid_prefix, repo_name]
+ [Rails.configuration.ClusterID, repo_name]
end
end
def assert_server_path(path_tail, repo_sym)
- assert_equal(File.join(Rails.configuration.git_repositories_dir, path_tail),
+ assert_equal(File.join(Rails.configuration.Git.Repositories, path_tail),
repositories(repo_sym).server_path)
end
assert_not_nil email
# Test the body of the sent email contains what we expect it to
- assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+ assert_equal Rails.configuration.Users.UserNotifierEmailFrom, email.from.first
assert_equal user.email, email.to.first
assert_equal 'Welcome to Arvados - shell account enabled', email.subject
assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
'Expected Your Arvados shell account has been set up in email body'
- assert (email.body.to_s.include? Rails.configuration.workbench_address),
+ assert (email.body.to_s.include? Rails.configuration.Services.Workbench1.ExternalURL.to_s),
'Expected workbench url in email body'
end
end
test "new username set avoiding blacklist" do
- Rails.configuration.auto_setup_name_blacklist = ["root"]
+ Rails.configuration.Users.AutoSetupUsernameBlacklist = ["root"]
check_new_username_setting("root", "root2")
end
[false, 'bar@example.com', nil, true],
[true, 'foo@example.com', true, nil],
[true, 'bar@example.com', true, true],
- [false, false, nil, nil],
- [true, false, true, nil]
+ [false, '', nil, nil],
+ [true, '', true, nil]
].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
# In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)"
end
- Rails.configuration.auto_admin_first_user = auto_admin_first_user_config
- Rails.configuration.auto_admin_user = auto_admin_user_config
+ Rails.configuration.Users.AutoAdminFirstUser = auto_admin_first_user_config
+ Rails.configuration.Users.AutoAdminUserWithEmail = auto_admin_user_config
# See if the foo user has is_admin
foo = User.new
test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
set_user_from_auth :admin
- Rails.configuration.auto_setup_new_users = true
+ Rails.configuration.Users.AutoSetupNewUsers = true
if auto_setup_vm
- Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid']
+ Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = virtual_machines(:testvm)['uuid']
else
- Rails.configuration.auto_setup_new_users_with_vm_uuid = false
+ Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = ""
end
- Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo
+ Rails.configuration.Users.AutoSetupNewUsersWithRepository = auto_setup_repo
create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username
end
end
def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username)
- Rails.configuration.new_user_notification_recipients = new_user_recipients
- Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients
+ Rails.configuration.Users.NewUserNotificationRecipients = new_user_recipients
+ Rails.configuration.Users.NewInactiveUserNotificationRecipients = inactive_recipients
ActionMailer::Base.deliveries = []
- can_setup = (Rails.configuration.auto_setup_new_users and
+ can_setup = (Rails.configuration.Users.AutoSetupNewUsers and
(not expect_username.nil?))
expect_repo_name = "#{expect_username}/#{expect_username}"
prior_repo = Repository.where(name: expect_repo_name).first
assert_equal(expect_username, user.username)
# check user setup
- verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+ verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
groups(:all_users).uuid, user.uuid,
"permission", "can_read")
# Check for OID login link.
- verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+ verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,
user.uuid, user.email, "permission", "can_login")
# Check for repository.
if named_repo = (prior_repo or
Repository.where(name: expect_repo_name).first)
verify_link_exists((can_setup and prior_repo.nil? and
- Rails.configuration.auto_setup_new_users_with_repository),
+ Rails.configuration.Users.AutoSetupNewUsersWithRepository),
named_repo.uuid, user.uuid, "permission", "can_manage")
end
# Check for VM login.
- if auto_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+ if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != ""
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
"permission", "can_login", "username", expect_username)
end
new_user_email = nil
new_inactive_user_email = nil
- new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification"
- if Rails.configuration.auto_setup_new_users
+ new_user_email_subject = "#{Rails.configuration.Users.EmailSubjectPrefix}New user created notification"
+ if Rails.configuration.Users.AutoSetupNewUsers
new_user_email_subject = (expect_username or active) ?
- "#{Rails.configuration.email_subject_prefix}New user created and setup notification" :
- "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification"
+ "#{Rails.configuration.Users.EmailSubjectPrefix}New user created and setup notification" :
+ "#{Rails.configuration.Users.EmailSubjectPrefix}New user created, but not setup notification"
end
ActionMailer::Base.deliveries.each do |d|
if d.subject == new_user_email_subject then
new_user_email = d
- elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then
+ elsif d.subject == "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification" then
new_inactive_user_email = d
end
end
# if the new user email recipients config parameter is set
if not new_user_recipients.empty? then
assert_not_nil new_user_email, 'Expected new user email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0]
+ assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]
assert_equal new_user_recipients, new_user_email.to[0]
assert_equal new_user_email_subject, new_user_email.subject
else
if not active
if not inactive_recipients.empty? then
assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0]
+ assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]
assert_equal inactive_recipients, new_inactive_user_email.to[0]
- assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject
+ assert_equal "#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification", new_inactive_user_email.subject
else
assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
end