X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/3f51fa4899fe43c29e1bf49d7911a40eb41a55e8..c790738ae71771a5574b166b3e93a1bca9b89bf6:/services/api/config/application.default.yml diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml index 006ac55e47..409dea63bb 100644 --- a/services/api/config/application.default.yml +++ b/services/api/config/application.default.yml @@ -41,6 +41,9 @@ test: active_support.deprecation: :stderr active_record.mass_assignment_sanitizer: :strict uuid_prefix: zzzzz + sso_app_id: arvados-server + sso_app_secret: <%= rand(2**512).to_s(36) %> + sso_provider_url: http://localhost:3002 secret_token: <%= rand(2**512).to_s(36) %> blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc user_profile_notification_address: arvados@example.com @@ -123,21 +126,32 @@ common: # silenced by throttling are not counted against this total. crunch_limit_log_bytes_per_job: 67108864 - # Path to dns server configuration directory (e.g. /etc/unbound.d/conf.d), - # or false = do not update dns server data. + # Path to dns server configuration directory + # (e.g. /etc/unbound.d/conf.d). If false, do not write any config + # files or touch restart.txt (see below). dns_server_conf_dir: false - # Template for the dns server host snippets. See unbound.template in this directory for - # an example. Set to false to disable. + # Template file for the dns server host snippets. See + # unbound.template in this directory for an example. If false, do + # not write any config files. dns_server_conf_template: false - # Dns server reload command, or false = do not reload dns server after data change + # String to write to {dns_server_conf_dir}/restart.txt (with a + # trailing newline) after updating local data. If false, do not + # open or write the restart.txt file. dns_server_reload_command: false - # Example for unbound + # Command to run after each DNS update. Template variables will be + # substituted; see the "unbound" example below. If false, do not run + # a command. + dns_server_update_command: false + + ## Example for unbound: #dns_server_conf_dir: /etc/unbound/conf.d #dns_server_conf_template: /path/to/your/api/server/config/unbound.template - #dns_server_reload_command: /etc/init.d/unbound reload + ## ...plus one of the following two methods of reloading: + #dns_server_reload_command: unbound-control reload + #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com compute_node_domain: false compute_node_nameservers: @@ -220,9 +234,23 @@ common: # a site secret. It should be at least 50 characters. blob_signing_key: ~ - # Amount of time (in seconds) for which a blob permission signature - # remains valid. Default: 2 weeks (1209600 seconds) - blob_signing_ttl: 1209600 + # Lifetime (in seconds) of blob permission signatures generated by + # the API server. This determines how long a client can take (after + # retrieving a collection record) to retrieve the collection data + # from Keep. If the client needs more time than that (assuming the + # collection still has the same content and the relevant user/token + # still has permission) the client can retrieve the collection again + # to get fresh signatures. + # + # Datamanager considers an unreferenced block older than this to be + # eligible for garbage collection. Therefore, it should never be + # smaller than the corresponding value used by any local keepstore + # service (see keepstore -blob-signature-ttl flag). This rule + # prevents datamanager from trying to garbage-collect recently + # written blocks while clients are still holding valid signatures. + # + # The default is 2 weeks. + blob_signature_ttl: 1209600 # Allow clients to create collections by providing a manifest with # unsigned data blob locators. IMPORTANT: This effectively disables @@ -266,27 +294,63 @@ common: # server is using a self-signed cert. sso_insecure: false + # These settings are provided by your OAuth2 provider (e.g., + # sso-provider). + sso_app_id: ~ + sso_app_secret: ~ + sso_provider_url: ~ + # Default replication level for collections. This is used when a # collection's replication_desired attribute is nil. default_collection_replication: 2 - # Maximum size (in bytes) allowed for a single API request that will be - # published in the discovery document for use by clients. - # Note you must separately configure the upstream web server or proxy to - # actually enforce the desired maximum request size on the server side. + # Maximum size (in bytes) allowed for a single API request. This + # limit is published in the discovery document for use by clients. + # Note: You must separately configure the upstream web server or + # proxy to actually enforce the desired maximum request size on the + # server side. max_request_size: 134217728 - # Stop collecting records for an index request after we read this much - # data (in bytes) from large database columns. - # Currently only `GET /collections` respects this parameter, when the - # user requests an index that includes manifest_text. Once the API - # server collects records with a total manifest_text size at or above - # this amount, it returns those results immediately. - # Note this is a threshold, not a limit. Record collection stops - # *after* reading this much data. + # Limit the number of bytes read from the database during an index + # request (by retrieving and returning fewer rows than would + # normally be returned in a single response). + # Note 1: This setting never reduces the number of returned rows to + # zero, no matter how big the first data row is. + # Note 2: Currently, this only limits the + # arvados.v1.collections.list API (GET /arvados/v1/collections), and + # only takes the size of manifest_text into account. Other fields + # (e.g., "properties" hashes) are not counted against this limit + # when returning collections, and the limit is not applied at all + # for other data types. max_index_database_read: 134217728 # When you run the db:delete_old_job_logs task, it will find jobs that # have been finished for at least this many seconds, and delete their # stderr logs from the logs table. clean_job_log_rows_after: <%= 30.days %> + + # The maximum number of compute nodes that can be in use simultaneously + # If this limit is reduced, any existing nodes with slot number >= new limit + # will not be counted against the new limit. In other words, the new limit + # won't be strictly enforced until those nodes with higher slot numbers + # go down. + max_compute_nodes: 64 + + # Docker image to be used when none found in runtime_constraints of a job + default_docker_image_for_jobs: false + + # Hostname to assign to a compute node when it sends a "ping" and the + # hostname in its Node record is nil. + # During bootstrapping, the "ping" script is expected to notice the + # hostname given in the ping response, and update its unix hostname + # accordingly. + # If false, leave the hostname alone (this is appropriate if your compute + # nodes' hostnames are already assigned by some other mechanism). + # + # One way or another, the hostnames of your node records should agree + # with your DNS records and your /etc/slurm-llnl/slurm.conf files. + # + # Example for compute0000, compute0001, ....: + # assign_node_hostname: compute%04d + # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) + assign_node_hostname: compute%d