# silenced by throttling are not counted against this total.
crunch_limit_log_bytes_per_job: 67108864
- # Path to dns server configuration directory (e.g. /etc/unbound.d/conf.d),
- # or false = do not update dns server data.
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
dns_server_conf_dir: false
- # Template for the dns server host snippets. See unbound.template in this directory for
- # an example. Set to false to disable.
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
dns_server_conf_template: false
- # Dns server reload command, or false = do not reload dns server after data change
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
dns_server_reload_command: false
- # Example for unbound
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ dns_server_update_command: false
+
+ ## Example for unbound:
#dns_server_conf_dir: /etc/unbound/conf.d
#dns_server_conf_template: /path/to/your/api/server/config/unbound.template
- #dns_server_reload_command: /etc/init.d/unbound reload
+ ## ...plus one of the following two methods of reloading:
+ #dns_server_reload_command: unbound-control reload
+ #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
compute_node_domain: false
compute_node_nameservers:
# Datamanager considers an unreferenced block older than this to be
# eligible for garbage collection. Therefore, it should never be
# smaller than the corresponding value used by any local keepstore
- # service (see keepstore -blob-signing-ttl flag). This rule prevents
- # datamanager from trying to garbage-collect recently written blocks
- # while clients are still holding valid signatures.
+ # service (see keepstore -blob-signature-ttl flag). This rule
+ # prevents datamanager from trying to garbage-collect recently
+ # written blocks while clients are still holding valid signatures.
#
# The default is 2 weeks.
blob_signature_ttl: 1209600
# collection's replication_desired attribute is nil.
default_collection_replication: 2
- # Maximum size (in bytes) allowed for a single API request that will be
- # published in the discovery document for use by clients.
- # Note you must separately configure the upstream web server or proxy to
- # actually enforce the desired maximum request size on the server side.
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
max_request_size: 134217728
- # Stop collecting records for an index request after we read this much
- # data (in bytes) from large database columns.
- # Currently only `GET /collections` respects this parameter, when the
- # user requests an index that includes manifest_text. Once the API
- # server collects records with a total manifest_text size at or above
- # this amount, it returns those results immediately.
- # Note this is a threshold, not a limit. Record collection stops
- # *after* reading this much data.
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this only limits the
+ # arvados.v1.collections.list API (GET /arvados/v1/collections), and
+ # only takes the size of manifest_text into account. Other fields
+ # (e.g., "properties" hashes) are not counted against this limit
+ # when returning collections, and the limit is not applied at all
+ # for other data types.
max_index_database_read: 134217728
# When you run the db:delete_old_job_logs task, it will find jobs that
# have been finished for at least this many seconds, and delete their
# stderr logs from the logs table.
clean_job_log_rows_after: <%= 30.days %>
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ max_compute_nodes: 64
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ default_docker_image_for_jobs: false
+
+ # Hostname to assign to a compute node when it sends a "ping" and the
+ # hostname in its Node record is nil.
+ # During bootstrapping, the "ping" script is expected to notice the
+ # hostname given in the ping response, and update its unix hostname
+ # accordingly.
+ # If false, leave the hostname alone (this is appropriate if your compute
+ # nodes' hostnames are already assigned by some other mechanism).
+ #
+ # One way or another, the hostnames of your node records should agree
+ # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+ #
+ # Example for compute0000, compute0001, ....:
+ # assign_node_hostname: compute%<slot_number>04d
+ # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+ assign_node_hostname: compute%<slot_number>d