From 47abf19591c9816f88f83db9fd2cbe93e2262e79 Mon Sep 17 00:00:00 2001 From: Peter Amstutz Date: Thu, 21 Mar 2019 16:39:32 -0400 Subject: [PATCH] 13996: Migrate majority of defaults to config.defaults.yml API server knows types of config parameters (needed for type coercion, also useful for type checking.) Arvados-DCO-1.1-Signed-off-by: Peter Amstutz --- lib/config/config.defaults.yml | 88 +++- .../app/controllers/application_controller.rb | 15 +- .../api/app/controllers/static_controller.rb | 4 +- .../controllers/user_sessions_controller.rb | 2 +- services/api/app/models/node.rb | 2 +- services/api/config/application.default.yml | 480 ------------------ .../api/config/initializers/load_config.rb | 213 +++++--- services/api/config/initializers/lograge.rb | 4 +- .../api/config/initializers/omniauth_init.rb | 12 +- services/api/lib/tasks/config_check.rake | 4 +- 10 files changed, 223 insertions(+), 601 deletions(-) diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml index 53fc5d9cb1..70162ee5ff 100644 --- a/lib/config/config.defaults.yml +++ b/lib/config/config.defaults.yml @@ -1,5 +1,15 @@ +# Copyright (C) The Arvados Authors. All rights reserved. # +# SPDX-License-Identifier: AGPL-3.0 + +# Do not use this file for site configuration. Create +# /etc/arvados/config.yml instead. # +# The order of precedence (highest to lowest): +# 1. Legacy component-specific config files (deprecated) +# 2. /etc/arvados/config.yml +# 3. config.defaults.yml + Clusters: xxxxx: SystemRootToken: "" @@ -8,6 +18,51 @@ Clusters: # Server expects request header of the format "Authorization: Bearer xxx" ManagementToken: "" + Services: + RailsAPI: + InternalURLs: {} + GitHTTP: + InternalURLs: {} + ExternalURL: "" + Keepstore: + InternalURLs: {} + Controller: + InternalURLs: {} + ExternalURL: "" + Websocket: + InternalURLs: {} + ExternalURL: "" + Keepbalance: + InternalURLs: {} + GitHTTP: + InternalURLs: {} + ExternalURL: "" + GitSSH: + ExternalURL: "" + DispatchCloud: + InternalURLs: {} + SSO: + ExternalURL: "" + Keepproxy: + InternalURLs: {} + ExternalURL: "" + WebDAV: + InternalURLs: {} + ExternalURL: "" + WebDAVDownload: + InternalURLs: {} + ExternalURL: "" + Keepstore: + InternalURLs: {} + Composer: + ExternalURL: "" + WebShell: + ExternalURL: "" + Workbench1: + InternalURLs: {} + ExternalURL: "" + Workbench2: + ExternalURL: "" API: # Maximum size (in bytes) allowed for a single API request. This # limit is published in the discovery document for use by clients. @@ -38,6 +93,11 @@ Clusters: # Example: ["jobs.create", "pipeline_instances.create"] DisabledAPIs: [] + # Interval (seconds) between asynchronous permission view updates. Any + # permission-updating API called with the 'async' parameter schedules a an + # update on the permission view in the future, if not already scheduled. + AsyncPermissionsUpdateInterval: 20 + Users: # Config parameters to automatically setup new users. If enabled, # this users will be able to self-activate. Enable this if you want @@ -165,11 +225,6 @@ Clusters: # arrived, and deleted if their delete_at time has arrived. TrashSweepInterval: 60 - # Interval (seconds) between asynchronous permission view updates. Any - # permission-updating API called with the 'async' parameter schedules a an - # update on the permission view in the future, if not already scheduled. - AsyncPermissionsUpdateInterval: 20 - # If true, enable collection versioning. # When a collection's preserve_version field is true or the current version # is older than the amount of seconds defined on preserve_version_if_idle, @@ -195,6 +250,9 @@ Clusters: # {git_repositories_dir}/arvados/.git Repositories: /var/lib/arvados/git/repositories + TLS: + Insecure: false + Containers: # List of supported Docker Registry image formats that compute nodes # are able to use. `arv keep docker` will error out if a user tries @@ -327,7 +385,7 @@ Clusters: # Example for compute0000, compute0001, ....: # assign_node_hostname: compute%04d # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) - AssignNodeHostname: compute%d + AssignNodeHostname: "compute%d" JobsAPI: # Enable the legacy Jobs API. @@ -346,8 +404,8 @@ Clusters: # Docker image to be used when none found in runtime_constraints of a job DefaultDockerImage: "" - # :none or :slurm_immediate - CrunchJobWrapper: :none + # none or slurm_immediate + CrunchJobWrapper: none # username, or false = do not set uid when running jobs. CrunchJobUser: crunch @@ -368,10 +426,10 @@ Clusters: ReuseJobIfOutputsDiffer: false Mail: - MailchimpAPIKey: # api-server/mailchimp_api_key - MailchimpListID: # api-server/mailchimp_list_id - SendUserSetupNotificationEmail: # workbench/send_user_setup_notification_email - IssueReporterEmailFrom: # workbench/issue_reporter_email_from - IssueReporterEmailTo: # workbench/issue_reporter_email_to - SupportEmailAddress: # workbench/support_email_address - EmailFrom: # workbench/email_from + MailchimpAPIKey: "" + MailchimpListID: "" + SendUserSetupNotificationEmail: "" + IssueReporterEmailFrom: "" + IssueReporterEmailTo: "" + SupportEmailAddress: "" + EmailFrom: "" diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb index 78fea32b23..1ec921b8ad 100644 --- a/services/api/app/controllers/application_controller.rb +++ b/services/api/app/controllers/application_controller.rb @@ -53,8 +53,6 @@ class ApplicationController < ActionController::Base before_action(:render_404_if_no_object, except: [:index, :create] + ERROR_ACTIONS) - theme Rails.configuration.arvados_theme - attr_writer :resource_attrs begin @@ -83,15 +81,10 @@ class ApplicationController < ActionController::Base def default_url_options options = {} - if Rails.configuration.host - options[:host] = Rails.configuration.host - end - if Rails.configuration.port - options[:port] = Rails.configuration.port - end - if Rails.configuration.protocol - options[:protocol] = Rails.configuration.protocol - end + exturl = URI.parse(Rails.configuration.Services["Controller"]["ExternalURL"]) + options[:host] = exturl.host + options[:port] = exturl.port + options[:protocol] = exturl.scheme options end diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb index b421f54596..53ab6ccb92 100644 --- a/services/api/app/controllers/static_controller.rb +++ b/services/api/app/controllers/static_controller.rb @@ -12,8 +12,8 @@ class StaticController < ApplicationController def home respond_to do |f| f.html do - if Rails.configuration.workbench_address - redirect_to Rails.configuration.workbench_address + if Rails.configuration.Services["Workbench1"]["ExternalURL"] + redirect_to Rails.configuration.Services["Workbench1"]["ExternalURL"] else render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead." end diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb index c5d3ae74f3..cab5442635 100644 --- a/services/api/app/controllers/user_sessions_controller.rb +++ b/services/api/app/controllers/user_sessions_controller.rb @@ -120,7 +120,7 @@ class UserSessionsController < ApplicationController flash[:notice] = 'You have logged off' return_to = params[:return_to] || root_url - redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}" + redirect_to "#{Rails.configuration.Services["SSO"]["ExternalURL"]}/users/sign_out?redirect_uri=#{CGI.escape return_to}" end # login - Just bounce to /auth/joshid. The only purpose of this function is diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb index 3c4712fded..9a99f7260b 100644 --- a/services/api/app/models/node.rb +++ b/services/api/app/models/node.rb @@ -203,7 +203,7 @@ class Node < ArvadosModel tmpfile = nil begin begin - template = IO.read(Rails.configuration.Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]) + template = IO.read(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]) rescue IOError, SystemCallError => e logger.error "Reading #{Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]}: #{e.message}" raise diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml index 98443b428f..66e09f6719 100644 --- a/services/api/config/application.default.yml +++ b/services/api/config/application.default.yml @@ -13,216 +13,6 @@ # 5. Section in application.default.yml called "common" common: - ### - ### Essential site configuration - ### - - # The prefix used for all database identifiers to identify the record as - # originating from this site. Must be exactly 5 alphanumeric characters - # (lowercase ASCII letters and digits). - uuid_prefix: ~ - - # secret_token is a string of alphanumeric characters used by Rails - # to sign session tokens. IMPORTANT: This is a site secret. It - # should be at least 50 characters. - secret_token: ~ - - # blob_signing_key is a string of alphanumeric characters used to - # generate permission signatures for Keep locators. It must be - # identical to the permission key given to Keep. IMPORTANT: This is - # a site secret. It should be at least 50 characters. - # - # Modifying blob_signing_key will invalidate all existing - # signatures, which can cause programs to fail (e.g., arv-put, - # arv-get, and Crunch jobs). To avoid errors, rotate keys only when - # no such processes are running. - blob_signing_key: ~ - - # These settings are provided by your OAuth2 provider (e.g., - # sso-provider). - sso_app_secret: ~ - sso_app_id: ~ - sso_provider_url: ~ - - # If this is not false, HTML requests at the API server's root URL - # are redirected to this location, and it is provided in the text of - # user activation notification email messages to remind them where - # to log in. - workbench_address: false - - # Client-facing URI for websocket service. Nginx should be - # configured to proxy this URI to arvados-ws; see - # http://doc.arvados.org/install/install-ws.html - # - # If websocket_address is false (which is the default), no websocket - # server will be advertised to clients. This configuration is not - # supported. - # - # Example: - #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket - websocket_address: false - - # Maximum number of websocket connections allowed - websocket_max_connections: 500 - - # Maximum number of events a single connection can be backlogged - websocket_max_notify_backlog: 1000 - - # Maximum number of subscriptions a single websocket connection can have - # active. - websocket_max_filters: 10 - - # Git repositories must be readable by api server, or you won't be - # able to submit crunch jobs. To pass the test suites, put a clone - # of the arvados tree in {git_repositories_dir}/arvados.git or - # {git_repositories_dir}/arvados/.git - git_repositories_dir: /var/lib/arvados/git/repositories - - # This is a (bare) repository that stores commits used in jobs. When a job - # runs, the source commits are first fetched into this repository, then this - # repository is used to deploy to compute nodes. This should NOT be a - # subdirectory of {git_repositiories_dir}. - git_internal_dir: /var/lib/arvados/internal.git - - # Default replication level for collections. This is used when a - # collection's replication_desired attribute is nil. - default_collection_replication: 2 - - - ### - ### Overriding default advertised hostnames/URLs - ### - - # If not false, this is the hostname, port, and protocol that will be used - # for root_url and advertised in the discovery document. By default, use - # the default Rails logic for deciding on a hostname. - host: false - port: false - protocol: false - - # Base part of SSH git clone url given with repository resources. If - # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is - # used. If false, SSH clone URLs are not advertised. Include a - # trailing ":" or "/" if needed: it will not be added automatically. - git_repo_ssh_base: true - - # Base part of HTTPS git clone urls given with repository - # resources. This is expected to be an arv-git-httpd service which - # accepts API tokens as HTTP-auth passwords. If true, the default - # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false, - # HTTPS clone URLs are not advertised. Include a trailing ":" or "/" - # if needed: it will not be added automatically. - git_repo_https_base: true - - - ### - ### New user and & email settings - ### - - # Config parameters to automatically setup new users. If enabled, - # this users will be able to self-activate. Enable this if you want - # to run an open instance where anyone can create an account and use - # the system without requiring manual approval. - # - # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on. - # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup. - auto_setup_new_users: false - auto_setup_new_users_with_vm_uuid: false - auto_setup_new_users_with_repository: false - auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog] - - # When new_users_are_active is set to true, new users will be active - # immediately. This skips the "self-activate" step which enforces - # user agreements. Should only be enabled for development. - new_users_are_active: false - - # The e-mail address of the user you would like to become marked as an admin - # user on their first login. - # In the default configuration, authentication happens through the Arvados SSO - # server, which uses OAuth2 against Google's servers, so in that case this - # should be an address associated with a Google account. - auto_admin_user: false - - # If auto_admin_first_user is set to true, the first user to log in when no - # other admin users exist will automatically become an admin user. - auto_admin_first_user: false - - # Email address to notify whenever a user creates a profile for the - # first time - user_profile_notification_address: false - - admin_notifier_email_from: arvados@example.com - email_subject_prefix: "[ARVADOS] " - user_notifier_email_from: arvados@example.com - new_user_notification_recipients: [ ] - new_inactive_user_notification_recipients: [ ] - - - ### - ### Limits, timeouts and durations - ### - - # Lifetime (in seconds) of blob permission signatures generated by - # the API server. This determines how long a client can take (after - # retrieving a collection record) to retrieve the collection data - # from Keep. If the client needs more time than that (assuming the - # collection still has the same content and the relevant user/token - # still has permission) the client can retrieve the collection again - # to get fresh signatures. - # - # This must be exactly equal to the -blob-signature-ttl flag used by - # keepstore servers. Otherwise, reading data blocks and saving - # collections will fail with HTTP 403 permission errors. - # - # Modifying blob_signature_ttl invalidates existing signatures; see - # blob_signing_key note above. - # - # The default is 2 weeks. - blob_signature_ttl: 1209600 - - # Default lifetime for ephemeral collections: 2 weeks. This must not - # be less than blob_signature_ttl. - default_trash_lifetime: 1209600 - - # Interval (seconds) between trash sweeps. During a trash sweep, - # collections are marked as trash if their trash_at time has - # arrived, and deleted if their delete_at time has arrived. - trash_sweep_interval: 60 - - # Interval (seconds) between asynchronous permission view updates. Any - # permission-updating API called with the 'async' parameter schedules a an - # update on the permission view in the future, if not already scheduled. - async_permissions_update_interval: 20 - - # Maximum characters of (JSON-encoded) query parameters to include - # in each request log entry. When params exceed this size, they will - # be JSON-encoded, truncated to this size, and logged as - # params_truncated. - max_request_log_params_size: 2000 - - # Maximum size (in bytes) allowed for a single API request. This - # limit is published in the discovery document for use by clients. - # Note: You must separately configure the upstream web server or - # proxy to actually enforce the desired maximum request size on the - # server side. - max_request_size: 134217728 - - # Limit the number of bytes read from the database during an index - # request (by retrieving and returning fewer rows than would - # normally be returned in a single response). - # Note 1: This setting never reduces the number of returned rows to - # zero, no matter how big the first data row is. - # Note 2: Currently, this is only checked against a specific set of - # columns that tend to get large (collections.manifest_text, - # containers.mounts, workflows.definition). Other fields (e.g., - # "properties" hashes) are not counted against this limit. - max_index_database_read: 134217728 - - # Maximum number of items to return when responding to a APIs that - # can return partial result sets using limit and offset parameters - # (e.g., *.index, groups.contents). If a request specifies a "limit" - # parameter higher than this value, this value is used instead. - max_items_per_response: 1000 # When you run the db:delete_old_job_logs task, it will find jobs that # have been finished for at least this many seconds, and delete their @@ -235,229 +25,6 @@ common: # crunchstat logs from the logs table. clean_container_log_rows_after: <%= 30.days %> - # Time to keep audit logs, in seconds. (An audit log is a row added - # to the "logs" table in the PostgreSQL database each time an - # Arvados object is created, modified, or deleted.) - # - # Currently, websocket event notifications rely on audit logs, so - # this should not be set lower than 600 (5 minutes). - max_audit_log_age: 1209600 - - # Maximum number of log rows to delete in a single SQL transaction. - # - # If max_audit_log_delete_batch is 0, log entries will never be - # deleted by Arvados. Cleanup can be done by an external process - # without affecting any Arvados system processes, as long as very - # recent (<5 minutes old) logs are not deleted. - # - # 100000 is a reasonable batch size for most sites. - max_audit_log_delete_batch: 0 - - # The maximum number of compute nodes that can be in use simultaneously - # If this limit is reduced, any existing nodes with slot number >= new limit - # will not be counted against the new limit. In other words, the new limit - # won't be strictly enforced until those nodes with higher slot numbers - # go down. - max_compute_nodes: 64 - - # These two settings control how frequently log events are flushed to the - # database. Log lines are buffered until either crunch_log_bytes_per_event - # has been reached or crunch_log_seconds_between_events has elapsed since - # the last flush. - crunch_log_bytes_per_event: 4096 - crunch_log_seconds_between_events: 1 - - # The sample period for throttling logs, in seconds. - crunch_log_throttle_period: 60 - - # Maximum number of bytes that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_bytes: 65536 - - # Maximum number of lines that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_lines: 1024 - - # Maximum bytes that may be logged by a single job. Log bytes that are - # silenced by throttling are not counted against this total. - crunch_limit_log_bytes_per_job: 67108864 - - crunch_log_partial_line_throttle_period: 5 - - # Container logs are written to Keep and saved in a collection, - # which is updated periodically while the container runs. This - # value sets the interval (given in seconds) between collection - # updates. - crunch_log_update_period: 1800 - - # The log collection is also updated when the specified amount of - # log data (given in bytes) is produced in less than one update - # period. - crunch_log_update_size: 33554432 - - # Attributes to suppress in events and audit logs. Notably, - # specifying ["manifest_text"] here typically makes the database - # smaller and faster. - # - # Warning: Using any non-empty value here can have undesirable side - # effects for any client or component that relies on event logs. - # Use at your own risk. - unlogged_attributes: [] - - # API methods to disable. Disabled methods are not listed in the - # discovery document, and respond 404 to all requests. - # Example: ["jobs.create", "pipeline_instances.create"] - disable_api_methods: [] - - # Enable the legacy Jobs API. - # auto -- (default) enable the Jobs API only if it has been used before - # (i.e., there are job records in the database) - # true -- enable the Jobs API despite lack of existing records. - # false -- disable the Jobs API despite presence of existing records. - enable_legacy_jobs_api: auto - - ### - ### Crunch, DNS & compute node management - ### - - # Preemptible instance support (e.g. AWS Spot Instances) - # When true, child containers will get created with the preemptible - # scheduling parameter parameter set. - preemptible_instances: false - - # Docker image to be used when none found in runtime_constraints of a job - default_docker_image_for_jobs: false - - # List of supported Docker Registry image formats that compute nodes - # are able to use. `arv keep docker` will error out if a user tries - # to store an image with an unsupported format. Use an empty array - # to skip the compatibility check (and display a warning message to - # that effect). - # - # Example for sites running docker < 1.10: ["v1"] - # Example for sites running docker >= 1.10: ["v2"] - # Example for disabling check: [] - docker_image_formats: ["v2"] - - # :none or :slurm_immediate - crunch_job_wrapper: :none - - # username, or false = do not set uid when running jobs. - crunch_job_user: crunch - - # The web service must be able to create/write this file, and - # crunch-job must be able to stat() it. - crunch_refresh_trigger: /tmp/crunch_refresh_trigger - - # Path to dns server configuration directory - # (e.g. /etc/unbound.d/conf.d). If false, do not write any config - # files or touch restart.txt (see below). - dns_server_conf_dir: false - - # Template file for the dns server host snippets. See - # unbound.template in this directory for an example. If false, do - # not write any config files. - dns_server_conf_template: false - - # String to write to {dns_server_conf_dir}/restart.txt (with a - # trailing newline) after updating local data. If false, do not - # open or write the restart.txt file. - dns_server_reload_command: false - - # Command to run after each DNS update. Template variables will be - # substituted; see the "unbound" example below. If false, do not run - # a command. - dns_server_update_command: false - - ## Example for unbound: - #dns_server_conf_dir: /etc/unbound/conf.d - #dns_server_conf_template: /path/to/your/api/server/config/unbound.template - ## ...plus one of the following two methods of reloading: - #dns_server_reload_command: unbound-control reload - #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com - - compute_node_domain: false - compute_node_nameservers: - - 192.168.1.1 - - # Hostname to assign to a compute node when it sends a "ping" and the - # hostname in its Node record is nil. - # During bootstrapping, the "ping" script is expected to notice the - # hostname given in the ping response, and update its unix hostname - # accordingly. - # If false, leave the hostname alone (this is appropriate if your compute - # nodes' hostnames are already assigned by some other mechanism). - # - # One way or another, the hostnames of your node records should agree - # with your DNS records and your /etc/slurm-llnl/slurm.conf files. - # - # Example for compute0000, compute0001, ....: - # assign_node_hostname: compute%04d - # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) - assign_node_hostname: compute%d - - - ### - ### Job and container reuse logic. - ### - - # Include details about job reuse decisions in the server log. This - # causes additional database queries to run, so it should not be - # enabled unless you expect to examine the resulting logs for - # troubleshooting purposes. - log_reuse_decisions: false - - # Control job reuse behavior when two completed jobs match the - # search criteria and have different outputs. - # - # If true, in case of a conflict, reuse the earliest job (this is - # similar to container reuse behavior). - # - # If false, in case of a conflict, do not reuse any completed job, - # but do reuse an already-running job if available (this is the - # original job reuse behavior, and is still the default). - reuse_job_if_outputs_differ: false - - ### - ### Federation support. - ### - - # You can enable use of this cluster by users who are authenticated - # by a remote Arvados site. Control which remote hosts are trusted - # to authenticate which user IDs by configuring remote_hosts, - # remote_hosts_via_dns, or both. The default configuration disables - # remote authentication. - - # Map known prefixes to hosts. For example, if user IDs beginning - # with "zzzzz-" should be authenticated by the Arvados server at - # "zzzzz.example.com", use: - # - # remote_hosts: - # zzzzz: zzzzz.example.com - remote_hosts: {} - - # Use {prefix}.arvadosapi.com for any prefix not given in - # remote_hosts above. - remote_hosts_via_dns: false - - # List of cluster prefixes. These are "trusted" clusters, users - # from the clusters listed here will be automatically setup and - # activated. This is separate from the settings - # auto_setup_new_users and new_users_are_active. - auto_activate_users_from: [] - - ### - ### Remaining assorted configuration options. - ### - - arvados_theme: default - - # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the - # Single Sign On (sso) server and remote Arvados sites. Should only - # be enabled during development when the SSO server is using a - # self-signed cert. - sso_insecure: false - ## Set Time.zone default to the specified zone and make Active ## Record auto-convert to this zone. Run "rake -D time" for a list ## of tasks for finding time zone names. Default is UTC. @@ -472,17 +39,6 @@ common: # Version of your assets, change this if you want to expire all your assets assets.version: "1.0" - # Allow clients to create collections by providing a manifest with - # unsigned data blob locators. IMPORTANT: This effectively disables - # access controls for data stored in Keep: a client who knows a hash - # can write a manifest that references the hash, pass it to - # collections.create (which will create a permission link), use - # collections.get to obtain a signature for that data locator, and - # use that signed locator to retrieve the data from Keep. Therefore, - # do not turn this on if your users expect to keep data private from - # one another! - permit_create_collection_with_unsigned_manifest: false - default_openid_prefix: https://www.google.com/accounts/o8/id # Override the automatic version string. With the default value of @@ -496,42 +52,6 @@ common: # (included in vendor packages). package_version: false - # Default value for container_count_max for container requests. This is the - # number of times Arvados will create a new container to satisfy a container - # request. If a container is cancelled it will retry a new container if - # container_count < container_count_max on any container requests associated - # with the cancelled container. - container_count_max: 3 - - # Default value for keep_cache_ram of a container's runtime_constraints. - container_default_keep_cache_ram: 268435456 - - # Token to be included in all healthcheck requests. Disabled by default. - # Server expects request header of the format "Authorization: Bearer xxx" - ManagementToken: false - - # URL of keep-web service. Provides read/write access to collections via - # HTTP and WebDAV protocols. - # - # Example: - # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/ - keep_web_service_url: false - - # If true, enable collection versioning. - # When a collection's preserve_version field is true or the current version - # is older than the amount of seconds defined on preserve_version_if_idle, - # a snapshot of the collection's previous state is created and linked to - # the current collection. - collection_versioning: false - # 0 = auto-create a new version on every update. - # -1 = never auto-create new versions. - # > 0 = auto-create a new version when older than the specified number of seconds. - preserve_version_if_idle: -1 - - # Number of times a container can be unlocked before being - # automatically cancelled. - max_container_dispatch_attempts: 5 - development: force_ssl: false cache_classes: false diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb index f52e500890..0a99b1afcd 100644 --- a/services/api/config/initializers/load_config.rb +++ b/services/api/config/initializers/load_config.rb @@ -20,9 +20,9 @@ EOS # Real values will be copied from globals by omniauth_init.rb. For # now, assign some strings so the generic *.yml config loader # doesn't overwrite them or complain that they're missing. - Rails.configuration.sso_app_id = 'xxx' - Rails.configuration.sso_app_secret = 'xxx' - Rails.configuration.sso_provider_url = '//xxx' + Rails.configuration.Login["ProviderAppID"] = 'xxx' + Rails.configuration.Login["ProviderAppSecret"] = 'xxx' + Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx' WARNED_OMNIAUTH_CONFIG = true end @@ -39,75 +39,110 @@ $arvados_config = {} end end -config_key_map = - { - "git_repositories_dir": "Git.Repositories", - "disable_api_methods": "API.DisabledAPIs", - "max_request_size": "API.MaxRequestSize", - "max_index_database_read": "API.MaxIndexDatabaseRead", - "max_items_per_response": "API.MaxItemsPerResponse", - "async_permissions_update_interval": "API.AsyncPermissionsUpdateInterval", - "auto_setup_new_users": "Users.AutoSetupNewUsers", - "auto_setup_new_users_with_vm_uuid": "Users.AutoSetupNewUsersWithVmUUID", - "auto_setup_new_users_with_repository": "Users.AutoSetupNewUsersWithRepository", - "auto_setup_name_blacklist": "Users.AutoSetupUsernameBlacklist", - "new_users_are_active": "Users.NewUsersAreActive", - "auto_admin_user": "Users.AutoAdminUserWithEmail", - "auto_admin_first_user": "Users.AutoAdminFirstUser", - "user_profile_notification_address": "Users.UserProfileNotificationAddress", - "admin_notifier_email_from": "Users.AdminNotifierEmailFrom", - "email_subject_prefix": "Users.EmailSubjectPrefix", - "user_notifier_email_from": "Users.UserNotifierEmailFrom", - "new_user_notification_recipients": "Users.NewUserNotificationRecipients", - "new_inactive_user_notification_recipients": "Users.NewInactiveUserNotificationRecipients", - "sso_app_secret": "Login.ProviderAppSecret", - "sso_app_id": "Login.ProviderAppID", - "max_audit_log_age": "AuditLogs.MaxAge", - "max_audit_log_delete_batch": "AuditLogs.MaxDeleteBatch", - "unlogged_attributes": "AuditLogs.UnloggedAttributes", - "max_request_log_params_size": "SystemLogs.MaxRequestLogParamsSize", - "default_collection_replication": "Collections.DefaultReplication", - "default_trash_lifetime": "Collections.DefaultTrashLifetime", - "collection_versioning": "Collections.CollectionVersioning", - "preserve_version_if_idle": "Collections.PreserveVersionIfIdle", - "trash_sweep_interval": "Collections.TrashSweepInterval", - "blob_signing_key": "Collections.BlobSigningKey", - "blob_signature_ttl": "Collections.BlobSigningTTL", - "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning", # XXX - "docker_image_formats": "Containers.SupportedDockerImageFormats", - "log_reuse_decisions": "Containers.LogReuseDecisions", - "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM", - "max_container_dispatch_attempts": "Containers.MaxDispatchAttempts", - "container_count_max": "Containers.MaxRetryAttempts", - "preemptible_instances": "Containers.UsePreemptibleInstances", - "max_compute_nodes": "Containers.MaxComputeVMs", - "crunch_log_bytes_per_event": "Containers.Logging.LogBytesPerEvent", - "crunch_log_seconds_between_events": "Containers.Logging.LogSecondsBetweenEvents", - "crunch_log_throttle_period": "Containers.Logging.LogThrottlePeriod", - "crunch_log_throttle_bytes": "Containers.Logging.LogThrottleBytes", - "crunch_log_throttle_lines": "Containers.Logging.LogThrottleLines", - "crunch_limit_log_bytes_per_job": "Containers.Logging.LimitLogBytesPerJob", - "crunch_log_partial_line_throttle_period": "Containers.Logging.LogPartialLineThrottlePeriod", - "crunch_log_update_period": "Containers.Logging.LogUpdatePeriod", - "crunch_log_update_size": "Containers.Logging.LogUpdateSize", - "clean_container_log_rows_after": "Containers.Logging.MaxAge", - "dns_server_conf_dir": "Containers.SLURM.Managed.DNSServerConfDir", - "dns_server_conf_template": "Containers.SLURM.Managed.DNSServerConfTemplate", - "dns_server_reload_command": "Containers.SLURM.Managed.DNSServerReloadCommand", - "dns_server_update_command": "Containers.SLURM.Managed.DNSServerUpdateCommand", - "compute_node_domain": "Containers.SLURM.Managed.ComputeNodeDomain", - "compute_node_nameservers": "Containers.SLURM.Managed.ComputeNodeNameservers", - "assign_node_hostname": "Containers.SLURM.Managed.AssignNodeHostname", - "enable_legacy_jobs_api": "Containers.JobsAPI.Enable", - "crunch_job_wrapper": "Containers.JobsAPI.CrunchJobWrapper", - "crunch_job_user": "Containers.JobsAPI.CrunchJobUser", - "crunch_refresh_trigger": "Containers.JobsAPI.CrunchRefreshTrigger", - "git_internal_dir": "Containers.JobsAPI.GitInternalDir", - "reuse_job_if_outputs_differ": "Containers.JobsAPI.ReuseJobIfOutputsDiffer", - "default_docker_image_for_jobs": "Containers.JobsAPI.DefaultDockerImage", - "mailchimp_api_key": "Mail.MailchimpAPIKey", - "mailchimp_list_id": "Mail.MailchimpListID", -} +def set_cfg cfg, k, v + # "foo.bar: baz" --> { config.foo.bar = baz } + ks = k.split '.' + k = ks.pop + ks.each do |kk| + cfg = cfg[kk] + if cfg.nil? + break + end + end + if !cfg.nil? + cfg[k] = v + end +end + +$config_migrate_map = {} +$config_types = {} +def declare_config(assign_to, configtype, migrate_from=nil) + if migrate_from + $config_migrate_map[migrate_from] = ->(cfg, k, v) { + set_cfg cfg, assign_to, v + } + end + $config_types[assign_to] = configtype +end + +module Boolean; end +class TrueClass; include Boolean; end +class FalseClass; include Boolean; end + +declare_config "ClusterID", String, :uuid_prefix +declare_config "Git.Repositories", String, :git_repositories_dir +declare_config "API.DisabledAPIs", Array, :disable_api_methods +declare_config "API.MaxRequestSize", Integer, :max_request_size +declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read +declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response +declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval +declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users +declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid +declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository +declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist +declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active +declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user +declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user +declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address +declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from +declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix +declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from +declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients +declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients +declare_config "Login.ProviderAppSecret", String, :sso_app_secret +declare_config "Login.ProviderAppID", String, :sso_app_id +declare_config "TLS.Insecure", Boolean, :sso_insecure +declare_config "Services.SSO.ExternalURL", String, :sso_provider_url +declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age +declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch +declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes +declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size +declare_config "Collections.DefaultReplication", Integer, :default_collection_replication +declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime +declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning +declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle +declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval +declare_config "Collections.BlobSigningKey", String, :blob_signing_key +declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl +declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest +declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats +declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions +declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram +declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts +declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max +declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances +declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes +declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event +declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events +declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period +declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes +declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines +declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job +declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period +declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period +declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size +declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after +declare_config "Containers.SLURM.Managed.DNSServerConfDir", String, :dns_server_conf_dir +declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", String, :dns_server_conf_template +declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command +declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command +declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain +declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers +declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname +declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api +declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper +declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user +declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger +declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir +declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ +declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs +declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key +declare_config "Mail.MailchimpListID", String, :mailchimp_list_id +declare_config "Services.Workbench1.ExternalURL", String, :workbench_address +declare_config "Services.Websocket.ExternalURL", String, :websocket_address +declare_config "Services.WebDAV.ExternalURL", String, :keep_web_service_url +declare_config "Services.GitHTTP.ExternalURL", String, :git_repo_https_base +declare_config "Services.GitSSH.ExternalURL", String, :git_repo_ssh_base application_config = {} %w(application.default application).each do |cfgfile| @@ -123,13 +158,16 @@ application_config = {} end application_config.each do |k, v| - cfg = $arvados_config - - if config_key_map[k.to_sym] - k = config_key_map[k.to_sym] + if $config_migrate_map[k.to_sym] + $config_migrate_map[k.to_sym].call $arvados_config, k, v + else + set_cfg $arvados_config, k, v end +end - # "foo.bar: baz" --> { config.foo.bar = baz } +$config_types.each do |cfgkey, cfgtype| + cfg = $arvados_config + k = cfgkey ks = k.split '.' k = ks.pop ks.each do |kk| @@ -138,12 +176,25 @@ application_config.each do |k, v| break end end - if !cfg.nil? - cfg[k] = v + if cfgtype == String and !cfg[k] + cfg[k] = "" + end + if cfgtype == ActiveSupport::Duration + if cfg[k].is_a? Integer + cfg[k] = cfg[k].seconds + elsif cfg[k].is_a? String + # TODO handle suffixes + end end -end -puts $arvados_config.to_yaml + if cfg.nil? + raise "missing #{cfgkey}" + end + + if !cfg[k].is_a? cfgtype + raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}" + end +end Server::Application.configure do nils = [] diff --git a/services/api/config/initializers/lograge.rb b/services/api/config/initializers/lograge.rb index ef4e428bff..07dba3aef4 100644 --- a/services/api/config/initializers/lograge.rb +++ b/services/api/config/initializers/lograge.rb @@ -38,8 +38,8 @@ Server::Application.configure do end params_s = SafeJSON.dump(params) - if params_s.length > Rails.configuration.max_request_log_params_size - payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]" + if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"] + payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]" else payload[:params] = params end diff --git a/services/api/config/initializers/omniauth_init.rb b/services/api/config/initializers/omniauth_init.rb index b5e98943df..5610999a94 100644 --- a/services/api/config/initializers/omniauth_init.rb +++ b/services/api/config/initializers/omniauth_init.rb @@ -9,15 +9,15 @@ if defined? CUSTOM_PROVIDER_URL Rails.logger.warn "Copying omniauth from globals in legacy config file." - Rails.configuration.sso_app_id = APP_ID - Rails.configuration.sso_app_secret = APP_SECRET - Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL + Rails.configuration.Login["ProviderAppID"] = APP_ID + Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET + Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL else Rails.application.config.middleware.use OmniAuth::Builder do provider(:josh_id, - Rails.configuration.sso_app_id, - Rails.configuration.sso_app_secret, - Rails.configuration.sso_provider_url) + Rails.configuration.Login["ProviderAppID"], + Rails.configuration.Login["ProviderAppSecret"], + Rails.configuration.Services["SSO"]["ExternalURL"]) end OmniAuth.config.on_failure = StaticController.action(:login_failure) end diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake index 4f071f11a3..c42c37edbb 100644 --- a/services/api/lib/tasks/config_check.rake +++ b/services/api/lib/tasks/config_check.rake @@ -21,8 +21,8 @@ namespace :config do end end # default_trash_lifetime cannot be less than 24 hours - if Rails.configuration.default_trash_lifetime < 86400 then - raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime + if Rails.configuration.Collections["DefaultTrashLifetime"] < 86400 then + raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections["DefaultTrashLifetime"] end end end -- 2.39.5