1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
7 var DefaultYAML = []byte(`# Copyright (C) The Arvados Authors. All rights reserved.
9 # SPDX-License-Identifier: AGPL-3.0
11 # Do not use this file for site configuration. Create
12 # /etc/arvados/config.yml instead.
14 # The order of precedence (highest to lowest):
15 # 1. Legacy component-specific config files (deprecated)
16 # 2. /etc/arvados/config.yml
17 # 3. config.default.yml
23 # Token to be included in all healthcheck requests. Disabled by default.
24 # Server expects request header of the format "Authorization: Bearer xxx"
57 # Base URL for Workbench inline preview. If blank, use
58 # WebDAVDownload instead, and disable inline preview.
59 # If both are empty, downloading collections from workbench
62 # It is important to properly configure the download service
63 # to migitate cross-site-scripting (XSS) attacks. A HTML page
64 # can be stored in collection. If an attacker causes a victim
65 # to visit that page through Workbench, it will be rendered by
66 # the browser. If all collections are served at the same
67 # domain, the browser will consider collections as coming from
68 # the same origin and having access to the same browsing data,
69 # enabling malicious Javascript on that page to access Arvados
70 # on behalf of the victim.
72 # This is mitigating by having separate domains for each
73 # collection, or limiting preview to circumstances where the
74 # collection is not accessed with the user's regular
77 # Serve preview links using uuid or pdh in subdomain
78 # (requires wildcard DNS and TLS certificate)
79 # https://*.collections.uuid_prefix.arvadosapi.com
81 # Serve preview links using uuid or pdh in main domain
82 # (requires wildcard DNS and TLS certificate)
83 # https://*--collections.uuid_prefix.arvadosapi.com
85 # Serve preview links by setting uuid or pdh in the path.
86 # This configuration only allows previews of public data or
87 # collection-sharing links, because these use the anonymous
88 # user token or the token is already embedded in the URL.
89 # Other data must be handled as downloads via WebDAVDownload:
90 # https://collections.uuid_prefix.arvadosapi.com
96 # Base URL for download links. If blank, serve links to WebDAV
97 # with disposition=attachment query param. Unlike preview links,
98 # browsers do not render attachments, so there is no risk of XSS.
100 # If WebDAVDownload is blank, and WebDAV uses a
101 # single-origin form, then Workbench will show an error page
103 # Serve download links by setting uuid or pdh in the path:
104 # https://download.uuid_prefix.arvadosapi.com
116 # ShellInABox service endpoint URL for a given VM. If empty, do not
117 # offer web shell logins.
119 # E.g., using a path-based proxy server to forward connections to shell hosts:
120 # https://webshell.uuid_prefix.arvadosapi.com
122 # E.g., using a name-based proxy server to forward connections to shell hosts:
123 # https://*.webshell.uuid_prefix.arvadosapi.com
139 # max concurrent connections per arvados server daemon
142 # All parameters here are passed to the PG client library in a connection string;
143 # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
150 # Maximum size (in bytes) allowed for a single API request. This
151 # limit is published in the discovery document for use by clients.
152 # Note: You must separately configure the upstream web server or
153 # proxy to actually enforce the desired maximum request size on the
155 MaxRequestSize: 134217728
157 # Limit the number of bytes read from the database during an index
158 # request (by retrieving and returning fewer rows than would
159 # normally be returned in a single response).
160 # Note 1: This setting never reduces the number of returned rows to
161 # zero, no matter how big the first data row is.
162 # Note 2: Currently, this is only checked against a specific set of
163 # columns that tend to get large (collections.manifest_text,
164 # containers.mounts, workflows.definition). Other fields (e.g.,
165 # "properties" hashes) are not counted against this limit.
166 MaxIndexDatabaseRead: 134217728
168 # Maximum number of items to return when responding to a APIs that
169 # can return partial result sets using limit and offset parameters
170 # (e.g., *.index, groups.contents). If a request specifies a "limit"
171 # parameter higher than this value, this value is used instead.
172 MaxItemsPerResponse: 1000
174 # API methods to disable. Disabled methods are not listed in the
175 # discovery document, and respond 404 to all requests.
176 # Example: ["jobs.create", "pipeline_instances.create"]
179 # Interval (seconds) between asynchronous permission view updates. Any
180 # permission-updating API called with the 'async' parameter schedules a an
181 # update on the permission view in the future, if not already scheduled.
182 AsyncPermissionsUpdateInterval: 20s
184 # Maximum number of concurrent outgoing requests to make while
185 # serving a single incoming multi-cluster (federated) request.
186 MaxRequestAmplification: 4
188 # RailsSessionSecretToken is a string of alphanumeric characters
189 # used by Rails to sign session tokens. IMPORTANT: This is a
190 # site secret. It should be at least 50 characters.
191 RailsSessionSecretToken: ""
193 # Maximum wall clock time to spend handling an incoming request.
197 # Config parameters to automatically setup new users. If enabled,
198 # this users will be able to self-activate. Enable this if you want
199 # to run an open instance where anyone can create an account and use
200 # the system without requiring manual approval.
202 # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
203 # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
204 AutoSetupNewUsers: false
205 AutoSetupNewUsersWithVmUUID: ""
206 AutoSetupNewUsersWithRepository: false
207 AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
209 # When new_users_are_active is set to true, new users will be active
210 # immediately. This skips the "self-activate" step which enforces
211 # user agreements. Should only be enabled for development.
212 NewUsersAreActive: false
214 # The e-mail address of the user you would like to become marked as an admin
215 # user on their first login.
216 # In the default configuration, authentication happens through the Arvados SSO
217 # server, which uses OAuth2 against Google's servers, so in that case this
218 # should be an address associated with a Google account.
219 AutoAdminUserWithEmail: ""
221 # If auto_admin_first_user is set to true, the first user to log in when no
222 # other admin users exist will automatically become an admin user.
223 AutoAdminFirstUser: false
225 # Email address to notify whenever a user creates a profile for the
227 UserProfileNotificationAddress: ""
228 AdminNotifierEmailFrom: arvados@example.com
229 EmailSubjectPrefix: "[ARVADOS] "
230 UserNotifierEmailFrom: arvados@example.com
231 NewUserNotificationRecipients: []
232 NewInactiveUserNotificationRecipients: []
234 # Set anonymous_user_token to enable anonymous user access. You can get
235 # the token by running "bundle exec ./script/get_anonymous_user_token.rb"
236 # in the directory where your API server is running.
237 AnonymousUserToken: ""
240 # Time to keep audit logs, in seconds. (An audit log is a row added
241 # to the "logs" table in the PostgreSQL database each time an
242 # Arvados object is created, modified, or deleted.)
244 # Currently, websocket event notifications rely on audit logs, so
245 # this should not be set lower than 300 (5 minutes).
248 # Maximum number of log rows to delete in a single SQL transaction.
250 # If max_audit_log_delete_batch is 0, log entries will never be
251 # deleted by Arvados. Cleanup can be done by an external process
252 # without affecting any Arvados system processes, as long as very
253 # recent (<5 minutes old) logs are not deleted.
255 # 100000 is a reasonable batch size for most sites.
258 # Attributes to suppress in events and audit logs. Notably,
259 # specifying ["manifest_text"] here typically makes the database
260 # smaller and faster.
262 # Warning: Using any non-empty value here can have undesirable side
263 # effects for any client or component that relies on event logs.
264 # Use at your own risk.
265 UnloggedAttributes: []
269 # Logging threshold: panic, fatal, error, warn, info, debug, or
273 # Logging format: json or text
276 # Maximum characters of (JSON-encoded) query parameters to include
277 # in each request log entry. When params exceed this size, they will
278 # be JSON-encoded, truncated to this size, and logged as
280 MaxRequestLogParamsSize: 2000
283 # Allow clients to create collections by providing a manifest with
284 # unsigned data blob locators. IMPORTANT: This effectively disables
285 # access controls for data stored in Keep: a client who knows a hash
286 # can write a manifest that references the hash, pass it to
287 # collections.create (which will create a permission link), use
288 # collections.get to obtain a signature for that data locator, and
289 # use that signed locator to retrieve the data from Keep. Therefore,
290 # do not turn this on if your users expect to keep data private from
294 # blob_signing_key is a string of alphanumeric characters used to
295 # generate permission signatures for Keep locators. It must be
296 # identical to the permission key given to Keep. IMPORTANT: This is
297 # a site secret. It should be at least 50 characters.
299 # Modifying blob_signing_key will invalidate all existing
300 # signatures, which can cause programs to fail (e.g., arv-put,
301 # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
302 # no such processes are running.
305 # Default replication level for collections. This is used when a
306 # collection's replication_desired attribute is nil.
307 DefaultReplication: 2
309 # Lifetime (in seconds) of blob permission signatures generated by
310 # the API server. This determines how long a client can take (after
311 # retrieving a collection record) to retrieve the collection data
312 # from Keep. If the client needs more time than that (assuming the
313 # collection still has the same content and the relevant user/token
314 # still has permission) the client can retrieve the collection again
315 # to get fresh signatures.
317 # This must be exactly equal to the -blob-signature-ttl flag used by
318 # keepstore servers. Otherwise, reading data blocks and saving
319 # collections will fail with HTTP 403 permission errors.
321 # Modifying blob_signature_ttl invalidates existing signatures; see
322 # blob_signing_key note above.
324 # The default is 2 weeks.
327 # Default lifetime for ephemeral collections: 2 weeks. This must not
328 # be less than blob_signature_ttl.
329 DefaultTrashLifetime: 336h
331 # Interval (seconds) between trash sweeps. During a trash sweep,
332 # collections are marked as trash if their trash_at time has
333 # arrived, and deleted if their delete_at time has arrived.
334 TrashSweepInterval: 60s
336 # If true, enable collection versioning.
337 # When a collection's preserve_version field is true or the current version
338 # is older than the amount of seconds defined on preserve_version_if_idle,
339 # a snapshot of the collection's previous state is created and linked to
340 # the current collection.
341 CollectionVersioning: false
343 # 0s = auto-create a new version on every update.
344 # -1s = never auto-create new versions.
345 # > 0s = auto-create a new version when older than the specified number of seconds.
346 PreserveVersionIfIdle: -1s
348 # Managed collection properties. At creation time, if the client didn't
349 # provide the listed keys, they will be automatically populated following
350 # one of the following behaviors:
352 # * UUID of the user who owns the containing project.
353 # responsible_person_uuid: {Function: original_owner, Protected: true}
355 # * Default concrete value.
356 # foo_bar: {Value: baz, Protected: false}
358 # If Protected is true, only an admin user can modify its value.
360 SAMPLE: {Function: original_owner, Protected: true}
362 # In "trust all content" mode, Workbench will redirect download
363 # requests to WebDAV preview link, even in the cases when
364 # WebDAV would have to expose XSS vulnerabilities in order to
365 # handle the redirect (see discussion on Services.WebDAV).
367 # This setting has no effect in the recommended configuration,
368 # where the WebDAV is configured to have a separate domain for
369 # every collection; in this case XSS protection is provided by
370 # browsers' same-origin policy.
372 # The default setting (false) is appropriate for a multi-user site.
373 TrustAllContent: false
376 # These settings are provided by your OAuth2 provider (e.g.,
378 ProviderAppSecret: ""
382 # Git repositories must be readable by api server, or you won't be
383 # able to submit crunch jobs. To pass the test suites, put a clone
384 # of the arvados tree in {git_repositories_dir}/arvados.git or
385 # {git_repositories_dir}/arvados/.git
386 Repositories: /var/lib/arvados/git/repositories
394 # List of supported Docker Registry image formats that compute nodes
395 # are able to use. ` + "`" + `arv keep docker` + "`" + ` will error out if a user tries
396 # to store an image with an unsupported format. Use an empty array
397 # to skip the compatibility check (and display a warning message to
400 # Example for sites running docker < 1.10: ["v1"]
401 # Example for sites running docker >= 1.10: ["v2"]
402 # Example for disabling check: []
403 SupportedDockerImageFormats: ["v2"]
405 # Include details about job reuse decisions in the server log. This
406 # causes additional database queries to run, so it should not be
407 # enabled unless you expect to examine the resulting logs for
408 # troubleshooting purposes.
409 LogReuseDecisions: false
411 # Default value for keep_cache_ram of a container's runtime_constraints.
412 DefaultKeepCacheRAM: 268435456
414 # Number of times a container can be unlocked before being
415 # automatically cancelled.
416 MaxDispatchAttempts: 5
418 # Default value for container_count_max for container requests. This is the
419 # number of times Arvados will create a new container to satisfy a container
420 # request. If a container is cancelled it will retry a new container if
421 # container_count < container_count_max on any container requests associated
422 # with the cancelled container.
425 # The maximum number of compute nodes that can be in use simultaneously
426 # If this limit is reduced, any existing nodes with slot number >= new limit
427 # will not be counted against the new limit. In other words, the new limit
428 # won't be strictly enforced until those nodes with higher slot numbers
432 # Preemptible instance support (e.g. AWS Spot Instances)
433 # When true, child containers will get created with the preemptible
434 # scheduling parameter parameter set.
435 UsePreemptibleInstances: false
437 # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the
438 # (experimental) cloud dispatcher for executing containers on
439 # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n"
440 # and ends with "\n-----END RSA PRIVATE KEY-----\n".
441 DispatchPrivateKey: none
443 # Maximum time to wait for workers to come up before abandoning
444 # stale locks from a previous dispatch process.
448 # When you run the db:delete_old_container_logs task, it will find
449 # containers that have been finished for at least this many seconds,
450 # and delete their stdout, stderr, arv-mount, crunch-run, and
451 # crunchstat logs from the logs table.
454 # These two settings control how frequently log events are flushed to the
455 # database. Log lines are buffered until either crunch_log_bytes_per_event
456 # has been reached or crunch_log_seconds_between_events has elapsed since
458 LogBytesPerEvent: 4096
459 LogSecondsBetweenEvents: 1
461 # The sample period for throttling logs.
462 LogThrottlePeriod: 60s
464 # Maximum number of bytes that job can log over crunch_log_throttle_period
465 # before being silenced until the end of the period.
466 LogThrottleBytes: 65536
468 # Maximum number of lines that job can log over crunch_log_throttle_period
469 # before being silenced until the end of the period.
470 LogThrottleLines: 1024
472 # Maximum bytes that may be logged by a single job. Log bytes that are
473 # silenced by throttling are not counted against this total.
474 LimitLogBytesPerJob: 67108864
476 LogPartialLineThrottlePeriod: 5s
478 # Container logs are written to Keep and saved in a
479 # collection, which is updated periodically while the
480 # container runs. This value sets the interval between
481 # collection updates.
484 # The log collection is also updated when the specified amount of
485 # log data (given in bytes) is produced in less than one update
491 # Path to dns server configuration directory
492 # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
493 # files or touch restart.txt (see below).
496 # Template file for the dns server host snippets. See
497 # unbound.template in this directory for an example. If false, do
498 # not write any config files.
499 DNSServerConfTemplate: ""
501 # String to write to {dns_server_conf_dir}/restart.txt (with a
502 # trailing newline) after updating local data. If false, do not
503 # open or write the restart.txt file.
504 DNSServerReloadCommand: ""
506 # Command to run after each DNS update. Template variables will be
507 # substituted; see the "unbound" example below. If false, do not run
509 DNSServerUpdateCommand: ""
511 ComputeNodeDomain: ""
512 ComputeNodeNameservers:
515 # Hostname to assign to a compute node when it sends a "ping" and the
516 # hostname in its Node record is nil.
517 # During bootstrapping, the "ping" script is expected to notice the
518 # hostname given in the ping response, and update its unix hostname
520 # If false, leave the hostname alone (this is appropriate if your compute
521 # nodes' hostnames are already assigned by some other mechanism).
523 # One way or another, the hostnames of your node records should agree
524 # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
526 # Example for compute0000, compute0001, ....:
527 # assign_node_hostname: compute%<slot_number>04d
528 # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
529 AssignNodeHostname: "compute%<slot_number>d"
532 # Enable the legacy Jobs API. This value must be a string.
533 # 'auto' -- (default) enable the Jobs API only if it has been used before
534 # (i.e., there are job records in the database)
535 # 'true' -- enable the Jobs API despite lack of existing records.
536 # 'false' -- disable the Jobs API despite presence of existing records.
539 # Git repositories must be readable by api server, or you won't be
540 # able to submit crunch jobs. To pass the test suites, put a clone
541 # of the arvados tree in {git_repositories_dir}/arvados.git or
542 # {git_repositories_dir}/arvados/.git
543 GitInternalDir: /var/lib/arvados/internal.git
545 # Docker image to be used when none found in runtime_constraints of a job
546 DefaultDockerImage: ""
548 # none or slurm_immediate
549 CrunchJobWrapper: none
551 # username, or false = do not set uid when running jobs.
552 CrunchJobUser: crunch
554 # The web service must be able to create/write this file, and
555 # crunch-job must be able to stat() it.
556 CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
558 # Control job reuse behavior when two completed jobs match the
559 # search criteria and have different outputs.
561 # If true, in case of a conflict, reuse the earliest job (this is
562 # similar to container reuse behavior).
564 # If false, in case of a conflict, do not reuse any completed job,
565 # but do reuse an already-running job if available (this is the
566 # original job reuse behavior, and is still the default).
567 ReuseJobIfOutputsDiffer: false
570 # Enable the cloud scheduler (experimental).
573 # Name/number of port where workers' SSH services listen.
576 # Interval between queue polls.
579 # Shell command to execute on each worker to determine whether
580 # the worker is booted and ready to run containers. It should
581 # exit zero if the worker is ready.
582 BootProbeCommand: "docker ps -q"
584 # Minimum interval between consecutive probes to a single
588 # Maximum probes per second, across all workers in a pool.
589 MaxProbesPerSecond: 10
591 # Time before repeating SIGTERM when killing a container.
594 # Time to give up on SIGTERM and write off the worker.
597 # Maximum create/destroy-instance operations per second (0 =
599 MaxCloudOpsPerSecond: 0
601 # Interval between cloud provider syncs/updates ("list all
605 # Time to leave an idle worker running (in case new containers
606 # appear in the queue that it can run) before shutting it
610 # Time to wait for a new worker to boot (i.e., pass
611 # BootProbeCommand) before giving up and shutting it down.
614 # Maximum time a worker can stay alive with no successful
615 # probes before being automatically shut down.
618 # Time after shutting down a worker to retry the
619 # shutdown/destroy operation.
622 # Worker VM image ID.
625 # Tags to add on all resources (VMs, NICs, disks) created by
626 # the container dispatcher. (Arvados's own tags --
627 # InstanceType, IdleBehavior, and InstanceSecret -- will also
632 # Prefix for predefined tags used by Arvados (InstanceSetID,
633 # InstanceType, InstanceSecret, IdleBehavior). With the
634 # default value "Arvados", tags are "ArvadosInstanceSetID",
635 # "ArvadosInstanceSecret", etc.
637 # This should only be changed while no cloud resources are in
638 # use and the cloud dispatcher is not running. Otherwise,
639 # VMs/resources that were added using the old tag prefix will
640 # need to be detected and cleaned up manually.
641 TagKeyPrefix: Arvados
643 # Cloud driver: "azure" (Microsoft Azure) or "ec2" (Amazon AWS).
646 # Cloud-specific driver parameters.
653 # (ec2) Instance configuration.
659 AdminUsername: debian
661 # (azure) Credentials.
667 # (azure) Instance configuration.
668 CloudEnvironment: AzurePublicCloud
675 DeleteDanglingResourcesAfter: 20s
676 AdminUsername: arvados
680 # Use the instance type name as the key (in place of "SAMPLE" in
681 # this sample entry).
683 # Cloud provider's instance type. Defaults to the configured type name.
687 IncludedScratch: 16GB
695 SendUserSetupNotificationEmail: true
697 # Bug/issue report notification to and from addresses
698 IssueReporterEmailFrom: "arvados@example.com"
699 IssueReporterEmailTo: "arvados@example.com"
700 SupportEmailAddress: "arvados@example.com"
702 # Generic issue email from
703 EmailFrom: "arvados@example.com"
712 # API endpoint host or host:port; default is {id}.arvadosapi.com
713 Host: sample.arvadosapi.com
715 # Perform a proxy request when a local client requests an
716 # object belonging to this remote.
719 # Default "https". Can be set to "http" for testing.
722 # Disable TLS verify. Can be set to true for testing.
725 # When users present tokens issued by this remote cluster, and
726 # their accounts are active on the remote cluster, activate
727 # them on this cluster too.
733 ActivationContactLink: mailto:info@arvados.org
734 ArvadosDocsite: https://doc.arvados.org
735 ArvadosPublicDataDocURL: https://playground.arvados.org/projects/public
736 ShowUserAgreementInline: false
739 # Scratch directory used by the remote repository browsing
740 # feature. If it doesn't exist, it (and any missing parents) will be
741 # created using mkdir_p.
742 RepositoryCache: /var/www/arvados-workbench/current/tmp/git
744 # Below is a sample setting of user_profile_form_fields config parameter.
745 # This configuration parameter should be set to either false (to disable) or
746 # to a map as shown below.
747 # Configure the map of input fields to be displayed in the profile page
748 # using the attribute "key" for each of the input fields.
749 # This sample shows configuration with one required and one optional form fields.
750 # For each of these input fields:
751 # You can specify "Type" as "text" or "select".
752 # List the "Options" to be displayed for each of the "select" menu.
753 # Set "Required" as "true" for any of these fields to make them required.
754 # If any of the required fields are missing in the user's profile, the user will be
755 # redirected to the profile page before they can access any Workbench features.
756 UserProfileFormFields: {}
757 # exampleTextValue: # key that will be set in properties
760 # FormFieldDescription: ""
763 # exampleOptionsValue:
766 # FormFieldDescription: ""
774 # Use "UserProfileFormMessage to configure the message you want
775 # to display on the profile page.
776 UserProfileFormMessage: 'Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.'
778 # Mimetypes of applications for which the view icon
779 # would be enabled in a collection's show page.
780 # It is sufficient to list only applications here.
781 # No need to list text and image types.
782 ApplicationMimetypesWithViewIcon:
799 # The maximum number of bytes to load in the log viewer
800 LogViewerMaxBytes: 1M
802 # When anonymous_user_token is configured, show public projects page
803 EnablePublicProjectsPage: true
805 # By default, disable the "Getting Started" popup which is specific to Arvados playground
806 EnableGettingStartedPopup: false
808 # Ask Arvados API server to compress its response payloads.
809 APIResponseCompression: true
811 # Timeouts for API requests.
812 APIClientConnectTimeout: 2m
813 APIClientReceiveTimeout: 5m
815 # Maximum number of historic log records of a running job to fetch
816 # and display in the Log tab, while subscribing to web sockets.
817 RunningJobLogRecordsToFetch: 2000
819 # In systems with many shared projects, loading of dashboard and topnav
820 # cab be slow due to collections indexing; use the following parameters
821 # to suppress these properties
822 ShowRecentCollectionsOnDashboard: true
823 ShowUserNotifications: true
825 # Enable/disable "multi-site search" in top nav ("true"/"false"), or
826 # a link to the multi-site search page on a "home" Workbench site.
829 # https://workbench.qr1hi.arvadosapi.com/collections/multisite
832 # Should workbench allow management of local git repositories? Set to false if
833 # the jobs api is disabled and there are no local git repositories.
836 SiteName: Arvados Workbench
837 ProfilingEnabled: false
839 # This is related to obsolete Google OpenID 1.0 login
840 # but some workbench stuff still expects it to be set.
841 DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
845 FileViewersConfigURL: ""
847 # Use experimental controller code (see https://dev.arvados.org/issues/14287)
848 EnableBetaController14287: false