1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: AGPL-3.0
5 # Do not use this file for site configuration. Create
6 # /etc/arvados/config.yml instead.
8 # The order of precedence (highest to lowest):
9 # 1. Legacy component-specific config files (deprecated)
10 # 2. /etc/arvados/config.yml
11 # 3. config.defaults.yml
17 # Token to be included in all healthcheck requests. Disabled by default.
18 # Server expects request header of the format "Authorization: Bearer xxx"
67 # Maximum size (in bytes) allowed for a single API request. This
68 # limit is published in the discovery document for use by clients.
69 # Note: You must separately configure the upstream web server or
70 # proxy to actually enforce the desired maximum request size on the
72 MaxRequestSize: 134217728
74 # Limit the number of bytes read from the database during an index
75 # request (by retrieving and returning fewer rows than would
76 # normally be returned in a single response).
77 # Note 1: This setting never reduces the number of returned rows to
78 # zero, no matter how big the first data row is.
79 # Note 2: Currently, this is only checked against a specific set of
80 # columns that tend to get large (collections.manifest_text,
81 # containers.mounts, workflows.definition). Other fields (e.g.,
82 # "properties" hashes) are not counted against this limit.
83 MaxIndexDatabaseRead: 134217728
85 # Maximum number of items to return when responding to a APIs that
86 # can return partial result sets using limit and offset parameters
87 # (e.g., *.index, groups.contents). If a request specifies a "limit"
88 # parameter higher than this value, this value is used instead.
89 MaxItemsPerResponse: 1000
91 # API methods to disable. Disabled methods are not listed in the
92 # discovery document, and respond 404 to all requests.
93 # Example: ["jobs.create", "pipeline_instances.create"]
96 # Interval (seconds) between asynchronous permission view updates. Any
97 # permission-updating API called with the 'async' parameter schedules a an
98 # update on the permission view in the future, if not already scheduled.
99 AsyncPermissionsUpdateInterval: 20
102 # Config parameters to automatically setup new users. If enabled,
103 # this users will be able to self-activate. Enable this if you want
104 # to run an open instance where anyone can create an account and use
105 # the system without requiring manual approval.
107 # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
108 # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
109 AutoSetupNewUsers: false
110 AutoSetupNewUsersWithVmUUID: ""
111 AutoSetupNewUsersWithRepository: false
112 AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
114 # When new_users_are_active is set to true, new users will be active
115 # immediately. This skips the "self-activate" step which enforces
116 # user agreements. Should only be enabled for development.
117 NewUsersAreActive: false
119 # The e-mail address of the user you would like to become marked as an admin
120 # user on their first login.
121 # In the default configuration, authentication happens through the Arvados SSO
122 # server, which uses OAuth2 against Google's servers, so in that case this
123 # should be an address associated with a Google account.
124 AutoAdminUserWithEmail: ""
126 # If auto_admin_first_user is set to true, the first user to log in when no
127 # other admin users exist will automatically become an admin user.
128 AutoAdminFirstUser: false
130 # Email address to notify whenever a user creates a profile for the
132 UserProfileNotificationAddress: ""
133 AdminNotifierEmailFrom: arvados@example.com
134 EmailSubjectPrefix: "[ARVADOS] "
135 UserNotifierEmailFrom: arvados@example.com
136 NewUserNotificationRecipients: []
137 NewInactiveUserNotificationRecipients: []
140 # Time to keep audit logs, in seconds. (An audit log is a row added
141 # to the "logs" table in the PostgreSQL database each time an
142 # Arvados object is created, modified, or deleted.)
144 # Currently, websocket event notifications rely on audit logs, so
145 # this should not be set lower than 600 (5 minutes).
148 # Maximum number of log rows to delete in a single SQL transaction.
150 # If max_audit_log_delete_batch is 0, log entries will never be
151 # deleted by Arvados. Cleanup can be done by an external process
152 # without affecting any Arvados system processes, as long as very
153 # recent (<5 minutes old) logs are not deleted.
155 # 100000 is a reasonable batch size for most sites.
158 # Attributes to suppress in events and audit logs. Notably,
159 # specifying ["manifest_text"] here typically makes the database
160 # smaller and faster.
162 # Warning: Using any non-empty value here can have undesirable side
163 # effects for any client or component that relies on event logs.
164 # Use at your own risk.
165 UnloggedAttributes: []
168 # Maximum characters of (JSON-encoded) query parameters to include
169 # in each request log entry. When params exceed this size, they will
170 # be JSON-encoded, truncated to this size, and logged as
172 MaxRequestLogParamsSize: 2000
175 # Allow clients to create collections by providing a manifest with
176 # unsigned data blob locators. IMPORTANT: This effectively disables
177 # access controls for data stored in Keep: a client who knows a hash
178 # can write a manifest that references the hash, pass it to
179 # collections.create (which will create a permission link), use
180 # collections.get to obtain a signature for that data locator, and
181 # use that signed locator to retrieve the data from Keep. Therefore,
182 # do not turn this on if your users expect to keep data private from
186 # blob_signing_key is a string of alphanumeric characters used to
187 # generate permission signatures for Keep locators. It must be
188 # identical to the permission key given to Keep. IMPORTANT: This is
189 # a site secret. It should be at least 50 characters.
191 # Modifying blob_signing_key will invalidate all existing
192 # signatures, which can cause programs to fail (e.g., arv-put,
193 # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
194 # no such processes are running.
197 # Default replication level for collections. This is used when a
198 # collection's replication_desired attribute is nil.
199 DefaultReplication: 2
201 # Lifetime (in seconds) of blob permission signatures generated by
202 # the API server. This determines how long a client can take (after
203 # retrieving a collection record) to retrieve the collection data
204 # from Keep. If the client needs more time than that (assuming the
205 # collection still has the same content and the relevant user/token
206 # still has permission) the client can retrieve the collection again
207 # to get fresh signatures.
209 # This must be exactly equal to the -blob-signature-ttl flag used by
210 # keepstore servers. Otherwise, reading data blocks and saving
211 # collections will fail with HTTP 403 permission errors.
213 # Modifying blob_signature_ttl invalidates existing signatures; see
214 # blob_signing_key note above.
216 # The default is 2 weeks.
217 BlobSigningTTL: 1209600
219 # Default lifetime for ephemeral collections: 2 weeks. This must not
220 # be less than blob_signature_ttl.
221 DefaultTrashLifetime: 1209600
223 # Interval (seconds) between trash sweeps. During a trash sweep,
224 # collections are marked as trash if their trash_at time has
225 # arrived, and deleted if their delete_at time has arrived.
226 TrashSweepInterval: 60
228 # If true, enable collection versioning.
229 # When a collection's preserve_version field is true or the current version
230 # is older than the amount of seconds defined on preserve_version_if_idle,
231 # a snapshot of the collection's previous state is created and linked to
232 # the current collection.
233 CollectionVersioning: false
235 # 0 = auto-create a new version on every update.
236 # -1 = never auto-create new versions.
237 # > 0 = auto-create a new version when older than the specified number of seconds.
238 PreserveVersionIfIdle: -1
241 # These settings are provided by your OAuth2 provider (e.g.,
243 ProviderAppSecret: ""
247 # Git repositories must be readable by api server, or you won't be
248 # able to submit crunch jobs. To pass the test suites, put a clone
249 # of the arvados tree in {git_repositories_dir}/arvados.git or
250 # {git_repositories_dir}/arvados/.git
251 Repositories: /var/lib/arvados/git/repositories
257 # List of supported Docker Registry image formats that compute nodes
258 # are able to use. `arv keep docker` will error out if a user tries
259 # to store an image with an unsupported format. Use an empty array
260 # to skip the compatibility check (and display a warning message to
263 # Example for sites running docker < 1.10: ["v1"]
264 # Example for sites running docker >= 1.10: ["v2"]
265 # Example for disabling check: []
266 SupportedDockerImageFormats: ["v2"]
268 # Include details about job reuse decisions in the server log. This
269 # causes additional database queries to run, so it should not be
270 # enabled unless you expect to examine the resulting logs for
271 # troubleshooting purposes.
272 LogReuseDecisions: false
274 # Default value for keep_cache_ram of a container's runtime_constraints.
275 DefaultKeepCacheRAM: 268435456
277 # Number of times a container can be unlocked before being
278 # automatically cancelled.
279 MaxDispatchAttempts: 5
281 # Default value for container_count_max for container requests. This is the
282 # number of times Arvados will create a new container to satisfy a container
283 # request. If a container is cancelled it will retry a new container if
284 # container_count < container_count_max on any container requests associated
285 # with the cancelled container.
288 # The maximum number of compute nodes that can be in use simultaneously
289 # If this limit is reduced, any existing nodes with slot number >= new limit
290 # will not be counted against the new limit. In other words, the new limit
291 # won't be strictly enforced until those nodes with higher slot numbers
295 # Preemptible instance support (e.g. AWS Spot Instances)
296 # When true, child containers will get created with the preemptible
297 # scheduling parameter parameter set.
298 UsePreemptibleInstances: false
300 # Include details about job reuse decisions in the server log. This
301 # causes additional database queries to run, so it should not be
302 # enabled unless you expect to examine the resulting logs for
303 # troubleshooting purposes.
304 LogReuseDecisions: false
307 # When you run the db:delete_old_container_logs task, it will find
308 # containers that have been finished for at least this many seconds,
309 # and delete their stdout, stderr, arv-mount, crunch-run, and
310 # crunchstat logs from the logs table.
313 # These two settings control how frequently log events are flushed to the
314 # database. Log lines are buffered until either crunch_log_bytes_per_event
315 # has been reached or crunch_log_seconds_between_events has elapsed since
317 LogBytesPerEvent: 4096
318 LogSecondsBetweenEvents: 1
320 # The sample period for throttling logs, in seconds.
321 LogThrottlePeriod: 60
323 # Maximum number of bytes that job can log over crunch_log_throttle_period
324 # before being silenced until the end of the period.
325 LogThrottleBytes: 65536
327 # Maximum number of lines that job can log over crunch_log_throttle_period
328 # before being silenced until the end of the period.
329 LogThrottleLines: 1024
331 # Maximum bytes that may be logged by a single job. Log bytes that are
332 # silenced by throttling are not counted against this total.
333 LimitLogBytesPerJob: 67108864
335 LogPartialLineThrottlePeriod: 5
337 # Container logs are written to Keep and saved in a collection,
338 # which is updated periodically while the container runs. This
339 # value sets the interval (given in seconds) between collection
341 LogUpdatePeriod: 1800
343 # The log collection is also updated when the specified amount of
344 # log data (given in bytes) is produced in less than one update
346 LogUpdateSize: 33554432
350 # Path to dns server configuration directory
351 # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
352 # files or touch restart.txt (see below).
355 # Template file for the dns server host snippets. See
356 # unbound.template in this directory for an example. If false, do
357 # not write any config files.
358 DNSServerConfTemplate: ""
360 # String to write to {dns_server_conf_dir}/restart.txt (with a
361 # trailing newline) after updating local data. If false, do not
362 # open or write the restart.txt file.
363 DNSServerReloadCommand: ""
365 # Command to run after each DNS update. Template variables will be
366 # substituted; see the "unbound" example below. If false, do not run
368 DNSServerUpdateCommand: ""
370 ComputeNodeDomain: ""
371 ComputeNodeNameservers:
374 # Hostname to assign to a compute node when it sends a "ping" and the
375 # hostname in its Node record is nil.
376 # During bootstrapping, the "ping" script is expected to notice the
377 # hostname given in the ping response, and update its unix hostname
379 # If false, leave the hostname alone (this is appropriate if your compute
380 # nodes' hostnames are already assigned by some other mechanism).
382 # One way or another, the hostnames of your node records should agree
383 # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
385 # Example for compute0000, compute0001, ....:
386 # assign_node_hostname: compute%<slot_number>04d
387 # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
388 AssignNodeHostname: "compute%<slot_number>d"
391 # Enable the legacy Jobs API. This value must be a string.
392 # 'auto' -- (default) enable the Jobs API only if it has been used before
393 # (i.e., there are job records in the database)
394 # 'true' -- enable the Jobs API despite lack of existing records.
395 # 'false' -- disable the Jobs API despite presence of existing records.
398 # Git repositories must be readable by api server, or you won't be
399 # able to submit crunch jobs. To pass the test suites, put a clone
400 # of the arvados tree in {git_repositories_dir}/arvados.git or
401 # {git_repositories_dir}/arvados/.git
402 GitInternalDir: /var/lib/arvados/internal.git
404 # Docker image to be used when none found in runtime_constraints of a job
405 DefaultDockerImage: ""
407 # none or slurm_immediate
408 CrunchJobWrapper: none
410 # username, or false = do not set uid when running jobs.
411 CrunchJobUser: crunch
413 # The web service must be able to create/write this file, and
414 # crunch-job must be able to stat() it.
415 CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
417 # Control job reuse behavior when two completed jobs match the
418 # search criteria and have different outputs.
420 # If true, in case of a conflict, reuse the earliest job (this is
421 # similar to container reuse behavior).
423 # If false, in case of a conflict, do not reuse any completed job,
424 # but do reuse an already-running job if available (this is the
425 # original job reuse behavior, and is still the default).
426 ReuseJobIfOutputsDiffer: false
431 SendUserSetupNotificationEmail: ""
432 IssueReporterEmailFrom: ""
433 IssueReporterEmailTo: ""
434 SupportEmailAddress: ""