X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/a2d7b1241424ea22e5ee81818d6562f92ee2731d..40dc04d6fa45b6b727e94986491c06132db4a582:/services/api/config/application.default.yml diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml index a76a567ee7..9fd5368c0a 100644 --- a/services/api/config/application.default.yml +++ b/services/api/config/application.default.yml @@ -13,434 +13,6 @@ # 5. Section in application.default.yml called "common" common: - ### - ### Essential site configuration - ### - - # The prefix used for all database identifiers to identify the record as - # originating from this site. Must be exactly 5 alphanumeric characters - # (lowercase ASCII letters and digits). - uuid_prefix: ~ - - # secret_token is a string of alphanumeric characters used by Rails - # to sign session tokens. IMPORTANT: This is a site secret. It - # should be at least 50 characters. - secret_token: ~ - - # blob_signing_key is a string of alphanumeric characters used to - # generate permission signatures for Keep locators. It must be - # identical to the permission key given to Keep. IMPORTANT: This is - # a site secret. It should be at least 50 characters. - # - # Modifying blob_signing_key will invalidate all existing - # signatures, which can cause programs to fail (e.g., arv-put, - # arv-get, and Crunch jobs). To avoid errors, rotate keys only when - # no such processes are running. - blob_signing_key: ~ - - # These settings are provided by your OAuth2 provider (e.g., - # sso-provider). - sso_app_secret: ~ - sso_app_id: ~ - sso_provider_url: ~ - - # If this is not false, HTML requests at the API server's root URL - # are redirected to this location, and it is provided in the text of - # user activation notification email messages to remind them where - # to log in. - workbench_address: false - - # Client-facing URI for websocket service. Nginx should be - # configured to proxy this URI to arvados-ws; see - # http://doc.arvados.org/install/install-ws.html - # - # If websocket_address is false (which is the default), no websocket - # server will be advertised to clients. This configuration is not - # supported. - # - # Example: - #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket - websocket_address: false - - # Maximum number of websocket connections allowed - websocket_max_connections: 500 - - # Maximum number of events a single connection can be backlogged - websocket_max_notify_backlog: 1000 - - # Maximum number of subscriptions a single websocket connection can have - # active. - websocket_max_filters: 10 - - # Git repositories must be readable by api server, or you won't be - # able to submit crunch jobs. To pass the test suites, put a clone - # of the arvados tree in {git_repositories_dir}/arvados.git or - # {git_repositories_dir}/arvados/.git - git_repositories_dir: /var/lib/arvados/git/repositories - - # This is a (bare) repository that stores commits used in jobs. When a job - # runs, the source commits are first fetched into this repository, then this - # repository is used to deploy to compute nodes. This should NOT be a - # subdirectory of {git_repositiories_dir}. - git_internal_dir: /var/lib/arvados/internal.git - - # Default replication level for collections. This is used when a - # collection's replication_desired attribute is nil. - default_collection_replication: 2 - - - ### - ### Overriding default advertised hostnames/URLs - ### - - # If not false, this is the hostname, port, and protocol that will be used - # for root_url and advertised in the discovery document. By default, use - # the default Rails logic for deciding on a hostname. - host: false - port: false - protocol: false - - # Base part of SSH git clone url given with repository resources. If - # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is - # used. If false, SSH clone URLs are not advertised. Include a - # trailing ":" or "/" if needed: it will not be added automatically. - git_repo_ssh_base: true - - # Base part of HTTPS git clone urls given with repository - # resources. This is expected to be an arv-git-httpd service which - # accepts API tokens as HTTP-auth passwords. If true, the default - # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false, - # HTTPS clone URLs are not advertised. Include a trailing ":" or "/" - # if needed: it will not be added automatically. - git_repo_https_base: true - - - ### - ### New user and & email settings - ### - - # Config parameters to automatically setup new users. If enabled, - # this users will be able to self-activate. Enable this if you want - # to run an open instance where anyone can create an account and use - # the system without requiring manual approval. - # - # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on. - # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup. - auto_setup_new_users: false - auto_setup_new_users_with_vm_uuid: false - auto_setup_new_users_with_repository: false - auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog] - - # When new_users_are_active is set to true, new users will be active - # immediately. This skips the "self-activate" step which enforces - # user agreements. Should only be enabled for development. - new_users_are_active: false - - # The e-mail address of the user you would like to become marked as an admin - # user on their first login. - # In the default configuration, authentication happens through the Arvados SSO - # server, which uses OAuth2 against Google's servers, so in that case this - # should be an address associated with a Google account. - auto_admin_user: false - - # If auto_admin_first_user is set to true, the first user to log in when no - # other admin users exist will automatically become an admin user. - auto_admin_first_user: false - - # Email address to notify whenever a user creates a profile for the - # first time - user_profile_notification_address: false - - admin_notifier_email_from: arvados@example.com - email_subject_prefix: "[ARVADOS] " - user_notifier_email_from: arvados@example.com - new_user_notification_recipients: [ ] - new_inactive_user_notification_recipients: [ ] - - - ### - ### Limits, timeouts and durations - ### - - # Lifetime (in seconds) of blob permission signatures generated by - # the API server. This determines how long a client can take (after - # retrieving a collection record) to retrieve the collection data - # from Keep. If the client needs more time than that (assuming the - # collection still has the same content and the relevant user/token - # still has permission) the client can retrieve the collection again - # to get fresh signatures. - # - # This must be exactly equal to the -blob-signature-ttl flag used by - # keepstore servers. Otherwise, reading data blocks and saving - # collections will fail with HTTP 403 permission errors. - # - # Modifying blob_signature_ttl invalidates existing signatures; see - # blob_signing_key note above. - # - # The default is 2 weeks. - blob_signature_ttl: 1209600 - - # Default lifetime for ephemeral collections: 2 weeks. This must not - # be less than blob_signature_ttl. - default_trash_lifetime: 1209600 - - # Interval (seconds) between trash sweeps. During a trash sweep, - # collections are marked as trash if their trash_at time has - # arrived, and deleted if their delete_at time has arrived. - trash_sweep_interval: 60 - - # Maximum characters of (JSON-encoded) query parameters to include - # in each request log entry. When params exceed this size, they will - # be JSON-encoded, truncated to this size, and logged as - # params_truncated. - max_request_log_params_size: 2000 - - # Maximum size (in bytes) allowed for a single API request. This - # limit is published in the discovery document for use by clients. - # Note: You must separately configure the upstream web server or - # proxy to actually enforce the desired maximum request size on the - # server side. - max_request_size: 134217728 - - # Limit the number of bytes read from the database during an index - # request (by retrieving and returning fewer rows than would - # normally be returned in a single response). - # Note 1: This setting never reduces the number of returned rows to - # zero, no matter how big the first data row is. - # Note 2: Currently, this is only checked against a specific set of - # columns that tend to get large (collections.manifest_text, - # containers.mounts, workflows.definition). Other fields (e.g., - # "properties" hashes) are not counted against this limit. - max_index_database_read: 134217728 - - # Maximum number of items to return when responding to a APIs that - # can return partial result sets using limit and offset parameters - # (e.g., *.index, groups.contents). If a request specifies a "limit" - # parameter higher than this value, this value is used instead. - max_items_per_response: 1000 - - # When you run the db:delete_old_job_logs task, it will find jobs that - # have been finished for at least this many seconds, and delete their - # stderr logs from the logs table. - clean_job_log_rows_after: <%= 30.days %> - - # When you run the db:delete_old_container_logs task, it will find - # containers that have been finished for at least this many seconds, - # and delete their stdout, stderr, arv-mount, crunch-run, and - # crunchstat logs from the logs table. - clean_container_log_rows_after: <%= 30.days %> - - # Time to keep audit logs, in seconds. (An audit log is a row added - # to the "logs" table in the PostgreSQL database each time an - # Arvados object is created, modified, or deleted.) - # - # Currently, websocket event notifications rely on audit logs, so - # this should not be set lower than 600 (5 minutes). - max_audit_log_age: 1209600 - - # Maximum number of log rows to delete in a single SQL transaction. - # - # If max_audit_log_delete_batch is 0, log entries will never be - # deleted by Arvados. Cleanup can be done by an external process - # without affecting any Arvados system processes, as long as very - # recent (<5 minutes old) logs are not deleted. - # - # 100000 is a reasonable batch size for most sites. - max_audit_log_delete_batch: 0 - - # The maximum number of compute nodes that can be in use simultaneously - # If this limit is reduced, any existing nodes with slot number >= new limit - # will not be counted against the new limit. In other words, the new limit - # won't be strictly enforced until those nodes with higher slot numbers - # go down. - max_compute_nodes: 64 - - # These two settings control how frequently log events are flushed to the - # database. Log lines are buffered until either crunch_log_bytes_per_event - # has been reached or crunch_log_seconds_between_events has elapsed since - # the last flush. - crunch_log_bytes_per_event: 4096 - crunch_log_seconds_between_events: 1 - - # The sample period for throttling logs, in seconds. - crunch_log_throttle_period: 60 - - # Maximum number of bytes that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_bytes: 65536 - - # Maximum number of lines that job can log over crunch_log_throttle_period - # before being silenced until the end of the period. - crunch_log_throttle_lines: 1024 - - # Maximum bytes that may be logged by a single job. Log bytes that are - # silenced by throttling are not counted against this total. - crunch_limit_log_bytes_per_job: 67108864 - - crunch_log_partial_line_throttle_period: 5 - - # Attributes to suppress in events and audit logs. Notably, - # specifying ["manifest_text"] here typically makes the database - # smaller and faster. - # - # Warning: Using any non-empty value here can have undesirable side - # effects for any client or component that relies on event logs. - # Use at your own risk. - unlogged_attributes: [] - - # API methods to disable. Disabled methods are not listed in the - # discovery document, and respond 404 to all requests. - # Example: ["jobs.create", "pipeline_instances.create"] - disable_api_methods: [] - - # Enable the legacy Jobs API. - # auto -- (default) enable the Jobs API only if it has been used before - # (i.e., there are job records in the database) - # true -- enable the Jobs API despite lack of existing records. - # false -- disable the Jobs API despite presence of existing records. - enable_legacy_jobs_api: auto - - ### - ### Crunch, DNS & compute node management - ### - - # Preemptible instance support (e.g. AWS Spot Instances) - # When true, child containers will get created with the preemptible - # scheduling parameter parameter set. - preemptible_instances: false - - # Docker image to be used when none found in runtime_constraints of a job - default_docker_image_for_jobs: false - - # List of supported Docker Registry image formats that compute nodes - # are able to use. `arv keep docker` will error out if a user tries - # to store an image with an unsupported format. Use an empty array - # to skip the compatibility check (and display a warning message to - # that effect). - # - # Example for sites running docker < 1.10: ["v1"] - # Example for sites running docker >= 1.10: ["v2"] - # Example for disabling check: [] - docker_image_formats: ["v2"] - - # :none or :slurm_immediate - crunch_job_wrapper: :none - - # username, or false = do not set uid when running jobs. - crunch_job_user: crunch - - # The web service must be able to create/write this file, and - # crunch-job must be able to stat() it. - crunch_refresh_trigger: /tmp/crunch_refresh_trigger - - # Path to dns server configuration directory - # (e.g. /etc/unbound.d/conf.d). If false, do not write any config - # files or touch restart.txt (see below). - dns_server_conf_dir: false - - # Template file for the dns server host snippets. See - # unbound.template in this directory for an example. If false, do - # not write any config files. - dns_server_conf_template: false - - # String to write to {dns_server_conf_dir}/restart.txt (with a - # trailing newline) after updating local data. If false, do not - # open or write the restart.txt file. - dns_server_reload_command: false - - # Command to run after each DNS update. Template variables will be - # substituted; see the "unbound" example below. If false, do not run - # a command. - dns_server_update_command: false - - ## Example for unbound: - #dns_server_conf_dir: /etc/unbound/conf.d - #dns_server_conf_template: /path/to/your/api/server/config/unbound.template - ## ...plus one of the following two methods of reloading: - #dns_server_reload_command: unbound-control reload - #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com - - compute_node_domain: false - compute_node_nameservers: - - 192.168.1.1 - - # Hostname to assign to a compute node when it sends a "ping" and the - # hostname in its Node record is nil. - # During bootstrapping, the "ping" script is expected to notice the - # hostname given in the ping response, and update its unix hostname - # accordingly. - # If false, leave the hostname alone (this is appropriate if your compute - # nodes' hostnames are already assigned by some other mechanism). - # - # One way or another, the hostnames of your node records should agree - # with your DNS records and your /etc/slurm-llnl/slurm.conf files. - # - # Example for compute0000, compute0001, ....: - # assign_node_hostname: compute%04d - # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) - assign_node_hostname: compute%d - - - ### - ### Job and container reuse logic. - ### - - # Include details about job reuse decisions in the server log. This - # causes additional database queries to run, so it should not be - # enabled unless you expect to examine the resulting logs for - # troubleshooting purposes. - log_reuse_decisions: false - - # Control job reuse behavior when two completed jobs match the - # search criteria and have different outputs. - # - # If true, in case of a conflict, reuse the earliest job (this is - # similar to container reuse behavior). - # - # If false, in case of a conflict, do not reuse any completed job, - # but do reuse an already-running job if available (this is the - # original job reuse behavior, and is still the default). - reuse_job_if_outputs_differ: false - - ### - ### Federation support. - ### - - # You can enable use of this cluster by users who are authenticated - # by a remote Arvados site. Control which remote hosts are trusted - # to authenticate which user IDs by configuring remote_hosts, - # remote_hosts_via_dns, or both. The default configuration disables - # remote authentication. - - # Map known prefixes to hosts. For example, if user IDs beginning - # with "zzzzz-" should be authenticated by the Arvados server at - # "zzzzz.example.com", use: - # - # remote_hosts: - # zzzzz: zzzzz.example.com - remote_hosts: {} - - # Use {prefix}.arvadosapi.com for any prefix not given in - # remote_hosts above. - remote_hosts_via_dns: false - - # List of cluster prefixes. These are "trusted" clusters, users - # from the clusters listed here will be automatically setup and - # activated. This is separate from the settings - # auto_setup_new_users and new_users_are_active. - auto_activate_users_from: [] - - ### - ### Remaining assorted configuration options. - ### - - arvados_theme: default - - # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the - # Single Sign On (sso) server and remote Arvados sites. Should only - # be enabled during development when the SSO server is using a - # self-signed cert. - sso_insecure: false ## Set Time.zone default to the specified zone and make Active ## Record auto-convert to this zone. Run "rake -D time" for a list @@ -456,17 +28,6 @@ common: # Version of your assets, change this if you want to expire all your assets assets.version: "1.0" - # Allow clients to create collections by providing a manifest with - # unsigned data blob locators. IMPORTANT: This effectively disables - # access controls for data stored in Keep: a client who knows a hash - # can write a manifest that references the hash, pass it to - # collections.create (which will create a permission link), use - # collections.get to obtain a signature for that data locator, and - # use that signed locator to retrieve the data from Keep. Therefore, - # do not turn this on if your users expect to keep data private from - # one another! - permit_create_collection_with_unsigned_manifest: false - default_openid_prefix: https://www.google.com/accounts/o8/id # Override the automatic version string. With the default value of @@ -480,33 +41,6 @@ common: # (included in vendor packages). package_version: false - # Enable asynchronous permission graph rebuild. Must run - # script/permission-updater.rb as a separate process. When the permission - # cache is invalidated, the background process will update the permission - # graph cache. This feature is experimental! - async_permissions_update: false - - # Default value for container_count_max for container requests. This is the - # number of times Arvados will create a new container to satisfy a container - # request. If a container is cancelled it will retry a new container if - # container_count < container_count_max on any container requests associated - # with the cancelled container. - container_count_max: 3 - - # Default value for keep_cache_ram of a container's runtime_constraints. - container_default_keep_cache_ram: 268435456 - - # Token to be included in all healthcheck requests. Disabled by default. - # Server expects request header of the format "Authorization: Bearer xxx" - ManagementToken: false - - # URL of keep-web service. Provides read/write access to collections via - # HTTP and WebDAV protocols. - # - # Example: - # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/ - keep_web_service_url: false - development: force_ssl: false cache_classes: false @@ -526,7 +60,8 @@ production: cache_classes: true consider_all_requests_local: false action_controller.perform_caching: true - serve_static_files: false + public_file_server: + enabled: false assets.compress: true assets.compile: false assets.digest: true @@ -534,8 +69,6 @@ production: test: force_ssl: false cache_classes: true - serve_static_files: true - static_cache_control: public, max-age=3600 whiny_nils: true consider_all_requests_local: true action_controller.perform_caching: false @@ -543,16 +76,3 @@ test: action_controller.allow_forgery_protection: false action_mailer.delivery_method: :test active_support.deprecation: :stderr - uuid_prefix: zzzzz - sso_app_id: arvados-server - sso_app_secret: <%= rand(2**512).to_s(36) %> - sso_provider_url: http://localhost:3002 - secret_token: <%= rand(2**512).to_s(36) %> - blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc - user_profile_notification_address: arvados@example.com - workbench_address: https://localhost:3001/ - git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %> - git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %> - websocket_address: "wss://0.0.0.0:<%= ENV['ARVADOS_TEST_WSS_PORT'] %>/websocket" - trash_sweep_interval: -1 - docker_image_formats: ["v1"]