1 # Copyright (C) The Arvados Authors. All rights reserved.
3 # SPDX-License-Identifier: AGPL-3.0
5 # Do not use this file for site configuration. Create
6 # /etc/arvados/config.yml instead.
8 # The order of precedence (highest to lowest):
9 # 1. Legacy component-specific config files (deprecated)
10 # 2. /etc/arvados/config.yml
11 # 3. config.default.yml
17 # Token to be included in all healthcheck requests. Disabled by default.
18 # Server expects request header of the format "Authorization: Bearer xxx"
67 # max concurrent connections per arvados server daemon
70 # All parameters here are passed to the PG client library in a connection string;
71 # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
78 # Maximum size (in bytes) allowed for a single API request. This
79 # limit is published in the discovery document for use by clients.
80 # Note: You must separately configure the upstream web server or
81 # proxy to actually enforce the desired maximum request size on the
83 MaxRequestSize: 134217728
85 # Limit the number of bytes read from the database during an index
86 # request (by retrieving and returning fewer rows than would
87 # normally be returned in a single response).
88 # Note 1: This setting never reduces the number of returned rows to
89 # zero, no matter how big the first data row is.
90 # Note 2: Currently, this is only checked against a specific set of
91 # columns that tend to get large (collections.manifest_text,
92 # containers.mounts, workflows.definition). Other fields (e.g.,
93 # "properties" hashes) are not counted against this limit.
94 MaxIndexDatabaseRead: 134217728
96 # Maximum number of items to return when responding to a APIs that
97 # can return partial result sets using limit and offset parameters
98 # (e.g., *.index, groups.contents). If a request specifies a "limit"
99 # parameter higher than this value, this value is used instead.
100 MaxItemsPerResponse: 1000
102 # API methods to disable. Disabled methods are not listed in the
103 # discovery document, and respond 404 to all requests.
104 # Example: ["jobs.create", "pipeline_instances.create"]
107 # Interval (seconds) between asynchronous permission view updates. Any
108 # permission-updating API called with the 'async' parameter schedules a an
109 # update on the permission view in the future, if not already scheduled.
110 AsyncPermissionsUpdateInterval: 20
112 # RailsSessionSecretToken is a string of alphanumeric characters
113 # used by Rails to sign session tokens. IMPORTANT: This is a
114 # site secret. It should be at least 50 characters.
115 RailsSessionSecretToken: ""
118 # Config parameters to automatically setup new users. If enabled,
119 # this users will be able to self-activate. Enable this if you want
120 # to run an open instance where anyone can create an account and use
121 # the system without requiring manual approval.
123 # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
124 # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
125 AutoSetupNewUsers: false
126 AutoSetupNewUsersWithVmUUID: ""
127 AutoSetupNewUsersWithRepository: false
128 AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
130 # When new_users_are_active is set to true, new users will be active
131 # immediately. This skips the "self-activate" step which enforces
132 # user agreements. Should only be enabled for development.
133 NewUsersAreActive: false
135 # The e-mail address of the user you would like to become marked as an admin
136 # user on their first login.
137 # In the default configuration, authentication happens through the Arvados SSO
138 # server, which uses OAuth2 against Google's servers, so in that case this
139 # should be an address associated with a Google account.
140 AutoAdminUserWithEmail: ""
142 # If auto_admin_first_user is set to true, the first user to log in when no
143 # other admin users exist will automatically become an admin user.
144 AutoAdminFirstUser: false
146 # Email address to notify whenever a user creates a profile for the
148 UserProfileNotificationAddress: ""
149 AdminNotifierEmailFrom: arvados@example.com
150 EmailSubjectPrefix: "[ARVADOS] "
151 UserNotifierEmailFrom: arvados@example.com
152 NewUserNotificationRecipients: []
153 NewInactiveUserNotificationRecipients: []
156 # Time to keep audit logs, in seconds. (An audit log is a row added
157 # to the "logs" table in the PostgreSQL database each time an
158 # Arvados object is created, modified, or deleted.)
160 # Currently, websocket event notifications rely on audit logs, so
161 # this should not be set lower than 600 (5 minutes).
164 # Maximum number of log rows to delete in a single SQL transaction.
166 # If max_audit_log_delete_batch is 0, log entries will never be
167 # deleted by Arvados. Cleanup can be done by an external process
168 # without affecting any Arvados system processes, as long as very
169 # recent (<5 minutes old) logs are not deleted.
171 # 100000 is a reasonable batch size for most sites.
174 # Attributes to suppress in events and audit logs. Notably,
175 # specifying ["manifest_text"] here typically makes the database
176 # smaller and faster.
178 # Warning: Using any non-empty value here can have undesirable side
179 # effects for any client or component that relies on event logs.
180 # Use at your own risk.
181 UnloggedAttributes: []
184 # Maximum characters of (JSON-encoded) query parameters to include
185 # in each request log entry. When params exceed this size, they will
186 # be JSON-encoded, truncated to this size, and logged as
188 MaxRequestLogParamsSize: 2000
191 # Allow clients to create collections by providing a manifest with
192 # unsigned data blob locators. IMPORTANT: This effectively disables
193 # access controls for data stored in Keep: a client who knows a hash
194 # can write a manifest that references the hash, pass it to
195 # collections.create (which will create a permission link), use
196 # collections.get to obtain a signature for that data locator, and
197 # use that signed locator to retrieve the data from Keep. Therefore,
198 # do not turn this on if your users expect to keep data private from
202 # blob_signing_key is a string of alphanumeric characters used to
203 # generate permission signatures for Keep locators. It must be
204 # identical to the permission key given to Keep. IMPORTANT: This is
205 # a site secret. It should be at least 50 characters.
207 # Modifying blob_signing_key will invalidate all existing
208 # signatures, which can cause programs to fail (e.g., arv-put,
209 # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
210 # no such processes are running.
213 # Default replication level for collections. This is used when a
214 # collection's replication_desired attribute is nil.
215 DefaultReplication: 2
217 # Lifetime (in seconds) of blob permission signatures generated by
218 # the API server. This determines how long a client can take (after
219 # retrieving a collection record) to retrieve the collection data
220 # from Keep. If the client needs more time than that (assuming the
221 # collection still has the same content and the relevant user/token
222 # still has permission) the client can retrieve the collection again
223 # to get fresh signatures.
225 # This must be exactly equal to the -blob-signature-ttl flag used by
226 # keepstore servers. Otherwise, reading data blocks and saving
227 # collections will fail with HTTP 403 permission errors.
229 # Modifying blob_signature_ttl invalidates existing signatures; see
230 # blob_signing_key note above.
232 # The default is 2 weeks.
233 BlobSigningTTL: 1209600
235 # Default lifetime for ephemeral collections: 2 weeks. This must not
236 # be less than blob_signature_ttl.
237 DefaultTrashLifetime: 1209600
239 # Interval (seconds) between trash sweeps. During a trash sweep,
240 # collections are marked as trash if their trash_at time has
241 # arrived, and deleted if their delete_at time has arrived.
242 TrashSweepInterval: 60
244 # If true, enable collection versioning.
245 # When a collection's preserve_version field is true or the current version
246 # is older than the amount of seconds defined on preserve_version_if_idle,
247 # a snapshot of the collection's previous state is created and linked to
248 # the current collection.
249 CollectionVersioning: false
251 # 0 = auto-create a new version on every update.
252 # -1 = never auto-create new versions.
253 # > 0 = auto-create a new version when older than the specified number of seconds.
254 PreserveVersionIfIdle: -1
257 # These settings are provided by your OAuth2 provider (e.g.,
259 ProviderAppSecret: ""
263 # Git repositories must be readable by api server, or you won't be
264 # able to submit crunch jobs. To pass the test suites, put a clone
265 # of the arvados tree in {git_repositories_dir}/arvados.git or
266 # {git_repositories_dir}/arvados/.git
267 Repositories: /var/lib/arvados/git/repositories
273 # List of supported Docker Registry image formats that compute nodes
274 # are able to use. `arv keep docker` will error out if a user tries
275 # to store an image with an unsupported format. Use an empty array
276 # to skip the compatibility check (and display a warning message to
279 # Example for sites running docker < 1.10: ["v1"]
280 # Example for sites running docker >= 1.10: ["v2"]
281 # Example for disabling check: []
282 SupportedDockerImageFormats: ["v2"]
284 # Include details about job reuse decisions in the server log. This
285 # causes additional database queries to run, so it should not be
286 # enabled unless you expect to examine the resulting logs for
287 # troubleshooting purposes.
288 LogReuseDecisions: false
290 # Default value for keep_cache_ram of a container's runtime_constraints.
291 DefaultKeepCacheRAM: 268435456
293 # Number of times a container can be unlocked before being
294 # automatically cancelled.
295 MaxDispatchAttempts: 5
297 # Default value for container_count_max for container requests. This is the
298 # number of times Arvados will create a new container to satisfy a container
299 # request. If a container is cancelled it will retry a new container if
300 # container_count < container_count_max on any container requests associated
301 # with the cancelled container.
304 # The maximum number of compute nodes that can be in use simultaneously
305 # If this limit is reduced, any existing nodes with slot number >= new limit
306 # will not be counted against the new limit. In other words, the new limit
307 # won't be strictly enforced until those nodes with higher slot numbers
311 # Preemptible instance support (e.g. AWS Spot Instances)
312 # When true, child containers will get created with the preemptible
313 # scheduling parameter parameter set.
314 UsePreemptibleInstances: false
316 # Include details about job reuse decisions in the server log. This
317 # causes additional database queries to run, so it should not be
318 # enabled unless you expect to examine the resulting logs for
319 # troubleshooting purposes.
320 LogReuseDecisions: false
323 # When you run the db:delete_old_container_logs task, it will find
324 # containers that have been finished for at least this many seconds,
325 # and delete their stdout, stderr, arv-mount, crunch-run, and
326 # crunchstat logs from the logs table.
329 # These two settings control how frequently log events are flushed to the
330 # database. Log lines are buffered until either crunch_log_bytes_per_event
331 # has been reached or crunch_log_seconds_between_events has elapsed since
333 LogBytesPerEvent: 4096
334 LogSecondsBetweenEvents: 1
336 # The sample period for throttling logs, in seconds.
337 LogThrottlePeriod: 60
339 # Maximum number of bytes that job can log over crunch_log_throttle_period
340 # before being silenced until the end of the period.
341 LogThrottleBytes: 65536
343 # Maximum number of lines that job can log over crunch_log_throttle_period
344 # before being silenced until the end of the period.
345 LogThrottleLines: 1024
347 # Maximum bytes that may be logged by a single job. Log bytes that are
348 # silenced by throttling are not counted against this total.
349 LimitLogBytesPerJob: 67108864
351 LogPartialLineThrottlePeriod: 5
353 # Container logs are written to Keep and saved in a collection,
354 # which is updated periodically while the container runs. This
355 # value sets the interval (given in seconds) between collection
357 LogUpdatePeriod: 1800
359 # The log collection is also updated when the specified amount of
360 # log data (given in bytes) is produced in less than one update
362 LogUpdateSize: 33554432
366 # Path to dns server configuration directory
367 # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
368 # files or touch restart.txt (see below).
371 # Template file for the dns server host snippets. See
372 # unbound.template in this directory for an example. If false, do
373 # not write any config files.
374 DNSServerConfTemplate: ""
376 # String to write to {dns_server_conf_dir}/restart.txt (with a
377 # trailing newline) after updating local data. If false, do not
378 # open or write the restart.txt file.
379 DNSServerReloadCommand: ""
381 # Command to run after each DNS update. Template variables will be
382 # substituted; see the "unbound" example below. If false, do not run
384 DNSServerUpdateCommand: ""
386 ComputeNodeDomain: ""
387 ComputeNodeNameservers:
390 # Hostname to assign to a compute node when it sends a "ping" and the
391 # hostname in its Node record is nil.
392 # During bootstrapping, the "ping" script is expected to notice the
393 # hostname given in the ping response, and update its unix hostname
395 # If false, leave the hostname alone (this is appropriate if your compute
396 # nodes' hostnames are already assigned by some other mechanism).
398 # One way or another, the hostnames of your node records should agree
399 # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
401 # Example for compute0000, compute0001, ....:
402 # assign_node_hostname: compute%<slot_number>04d
403 # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
404 AssignNodeHostname: "compute%<slot_number>d"
407 # Enable the legacy Jobs API. This value must be a string.
408 # 'auto' -- (default) enable the Jobs API only if it has been used before
409 # (i.e., there are job records in the database)
410 # 'true' -- enable the Jobs API despite lack of existing records.
411 # 'false' -- disable the Jobs API despite presence of existing records.
414 # Git repositories must be readable by api server, or you won't be
415 # able to submit crunch jobs. To pass the test suites, put a clone
416 # of the arvados tree in {git_repositories_dir}/arvados.git or
417 # {git_repositories_dir}/arvados/.git
418 GitInternalDir: /var/lib/arvados/internal.git
420 # Docker image to be used when none found in runtime_constraints of a job
421 DefaultDockerImage: ""
423 # none or slurm_immediate
424 CrunchJobWrapper: none
426 # username, or false = do not set uid when running jobs.
427 CrunchJobUser: crunch
429 # The web service must be able to create/write this file, and
430 # crunch-job must be able to stat() it.
431 CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
433 # Control job reuse behavior when two completed jobs match the
434 # search criteria and have different outputs.
436 # If true, in case of a conflict, reuse the earliest job (this is
437 # similar to container reuse behavior).
439 # If false, in case of a conflict, do not reuse any completed job,
440 # but do reuse an already-running job if available (this is the
441 # original job reuse behavior, and is still the default).
442 ReuseJobIfOutputsDiffer: false
447 SendUserSetupNotificationEmail: ""
448 IssueReporterEmailFrom: ""
449 IssueReporterEmailTo: ""
450 SupportEmailAddress: ""