# silenced by throttling are not counted against this total.
crunch_limit_log_bytes_per_job: 67108864
- # Path to dns server configuration directory (e.g. /etc/unbound.d/conf.d),
- # or false = do not update dns server data.
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
dns_server_conf_dir: false
- # Template for the dns server host snippets. See unbound.template in this directory for
- # an example. Set to false to disable.
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
dns_server_conf_template: false
- # Dns server reload command, or false = do not reload dns server after data change
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
dns_server_reload_command: false
- # Example for unbound
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ dns_server_update_command: false
+
+ ## Example for unbound:
#dns_server_conf_dir: /etc/unbound/conf.d
#dns_server_conf_template: /path/to/your/api/server/config/unbound.template
- #dns_server_reload_command: /etc/init.d/unbound reload
+ ## ...plus one of the following two methods of reloading:
+ #dns_server_reload_command: unbound-control reload
+ #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
compute_node_domain: false
compute_node_nameservers:
# Datamanager considers an unreferenced block older than this to be
# eligible for garbage collection. Therefore, it should never be
# smaller than the corresponding value used by any local keepstore
- # service (see keepstore -blob-signing-ttl flag). This rule prevents
- # datamanager from trying to garbage-collect recently written blocks
- # while clients are still holding valid signatures.
+ # service (see keepstore -blob-signature-ttl flag). This rule
+ # prevents datamanager from trying to garbage-collect recently
+ # written blocks while clients are still holding valid signatures.
#
# The default is 2 weeks.
blob_signature_ttl: 1209600
# collection's replication_desired attribute is nil.
default_collection_replication: 2
- # Maximum size (in bytes) allowed for a single API request that will be
- # published in the discovery document for use by clients.
- # Note you must separately configure the upstream web server or proxy to
- # actually enforce the desired maximum request size on the server side.
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
max_request_size: 134217728
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this only limits the
+ # arvados.v1.collections.list API (GET /arvados/v1/collections), and
+ # only takes the size of manifest_text into account. Other fields
+ # (e.g., "properties" hashes) are not counted against this limit
+ # when returning collections, and the limit is not applied at all
+ # for other data types.
+ max_index_database_read: 134217728
+
# When you run the db:delete_old_job_logs task, it will find jobs that
# have been finished for at least this many seconds, and delete their
# stderr logs from the logs table.
clean_job_log_rows_after: <%= 30.days %>
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ max_compute_nodes: 64
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ default_docker_image_for_jobs: false