1 # Do not use this file for site configuration. Create application.yml
2 # instead (see application.yml.example).
4 # The order of precedence is:
5 # 1. config/environments/{RAILS_ENV}.rb (deprecated)
6 # 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
7 # 3. Section in application.yml called "common"
8 # 4. Section in application.default.yml corresponding to RAILS_ENV
9 # 5. Section in application.default.yml called "common"
13 ### Essential site configuration
16 # The prefix used for all database identifiers to identify the record as
17 # originating from this site. Must be exactly 5 alphanumeric characters
18 # (lowercase ASCII letters and digits).
21 # secret_token is a string of alphanumeric characters used by Rails
22 # to sign session tokens. IMPORTANT: This is a site secret. It
23 # should be at least 50 characters.
26 # blob_signing_key is a string of alphanumeric characters used to
27 # generate permission signatures for Keep locators. It must be
28 # identical to the permission key given to Keep. IMPORTANT: This is
29 # a site secret. It should be at least 50 characters.
31 # Modifying blob_signing_key will invalidate all existing
32 # signatures, which can cause programs to fail (e.g., arv-put,
33 # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
34 # no such processes are running.
37 # These settings are provided by your OAuth2 provider (e.g.,
43 # If this is not false, HTML requests at the API server's root URL
44 # are redirected to this location, and it is provided in the text of
45 # user activation notification email messages to remind them where
47 workbench_address: false
49 # Client-facing URI for websocket service. Nginx should be
50 # configured to proxy this URI to arvados-ws; see
51 # http://doc.arvados.org/install/install-ws.html
53 # If websocket_address is false (which is the default), no websocket
54 # server will be advertised to clients. This configuration is not
58 #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
59 websocket_address: false
61 # Maximum number of websocket connections allowed
62 websocket_max_connections: 500
64 # Maximum number of events a single connection can be backlogged
65 websocket_max_notify_backlog: 1000
67 # Maximum number of subscriptions a single websocket connection can have
69 websocket_max_filters: 10
71 # Git repositories must be readable by api server, or you won't be
72 # able to submit crunch jobs. To pass the test suites, put a clone
73 # of the arvados tree in {git_repositories_dir}/arvados.git or
74 # {git_repositories_dir}/arvados/.git
75 git_repositories_dir: /var/lib/arvados/git/repositories
77 # This is a (bare) repository that stores commits used in jobs. When a job
78 # runs, the source commits are first fetched into this repository, then this
79 # repository is used to deploy to compute nodes. This should NOT be a
80 # subdirectory of {git_repositiories_dir}.
81 git_internal_dir: /var/lib/arvados/internal.git
83 # Default replication level for collections. This is used when a
84 # collection's replication_desired attribute is nil.
85 default_collection_replication: 2
89 ### Overriding default advertised hostnames/URLs
92 # If not false, this is the hostname that will be used for root_url and
93 # advertised in the discovery document. By default, use the default Rails
94 # logic for deciding on a hostname.
97 # Base part of SSH git clone url given with repository resources. If
98 # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
99 # used. If false, SSH clone URLs are not advertised. Include a
100 # trailing ":" or "/" if needed: it will not be added automatically.
101 git_repo_ssh_base: true
103 # Base part of HTTPS git clone urls given with repository
104 # resources. This is expected to be an arv-git-httpd service which
105 # accepts API tokens as HTTP-auth passwords. If true, the default
106 # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
107 # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
108 # if needed: it will not be added automatically.
109 git_repo_https_base: true
113 ### New user and & email settings
116 # Config parameters to automatically setup new users.
117 # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
118 # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
119 auto_setup_new_users: false
120 auto_setup_new_users_with_vm_uuid: false
121 auto_setup_new_users_with_repository: false
122 auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
124 # When new_users_are_active is set to true, the user agreement check is skipped.
125 new_users_are_active: false
127 # The e-mail address of the user you would like to become marked as an admin
128 # user on their first login.
129 # In the default configuration, authentication happens through the Arvados SSO
130 # server, which uses OAuth2 against Google's servers, so in that case this
131 # should be an address associated with a Google account.
132 auto_admin_user: false
134 # If auto_admin_first_user is set to true, the first user to log in when no
135 # other admin users exist will automatically become an admin user.
136 auto_admin_first_user: false
138 # Email address to notify whenever a user creates a profile for the
140 user_profile_notification_address: false
142 admin_notifier_email_from: arvados@example.com
143 email_subject_prefix: "[ARVADOS] "
144 user_notifier_email_from: arvados@example.com
145 new_user_notification_recipients: [ ]
146 new_inactive_user_notification_recipients: [ ]
150 ### Limits, timeouts and durations
153 # Lifetime (in seconds) of blob permission signatures generated by
154 # the API server. This determines how long a client can take (after
155 # retrieving a collection record) to retrieve the collection data
156 # from Keep. If the client needs more time than that (assuming the
157 # collection still has the same content and the relevant user/token
158 # still has permission) the client can retrieve the collection again
159 # to get fresh signatures.
161 # This must be exactly equal to the -blob-signature-ttl flag used by
162 # keepstore servers. Otherwise, reading data blocks and saving
163 # collections will fail with HTTP 403 permission errors.
165 # Modifying blob_signature_ttl invalidates existing signatures; see
166 # blob_signing_key note above.
168 # The default is 2 weeks.
169 blob_signature_ttl: 1209600
171 # Default lifetime for ephemeral collections: 2 weeks. This must not
172 # be less than blob_signature_ttl.
173 default_trash_lifetime: 1209600
175 # Interval (seconds) between trash sweeps. During a trash sweep,
176 # collections are marked as trash if their trash_at time has
177 # arrived, and deleted if their delete_at time has arrived.
178 trash_sweep_interval: 60
180 # Maximum characters of (JSON-encoded) query parameters to include
181 # in each request log entry. When params exceed this size, they will
182 # be JSON-encoded, truncated to this size, and logged as
184 max_request_log_params_size: 2000
186 # Maximum size (in bytes) allowed for a single API request. This
187 # limit is published in the discovery document for use by clients.
188 # Note: You must separately configure the upstream web server or
189 # proxy to actually enforce the desired maximum request size on the
191 max_request_size: 134217728
193 # Limit the number of bytes read from the database during an index
194 # request (by retrieving and returning fewer rows than would
195 # normally be returned in a single response).
196 # Note 1: This setting never reduces the number of returned rows to
197 # zero, no matter how big the first data row is.
198 # Note 2: Currently, this is only checked against a specific set of
199 # columns that tend to get large (collections.manifest_text,
200 # containers.mounts, workflows.definition). Other fields (e.g.,
201 # "properties" hashes) are not counted against this limit.
202 max_index_database_read: 134217728
204 # Maximum number of items to return when responding to a APIs that
205 # can return partial result sets using limit and offset parameters
206 # (e.g., *.index, groups.contents). If a request specifies a "limit"
207 # parameter higher than this value, this value is used instead.
208 max_items_per_response: 1000
210 # When you run the db:delete_old_job_logs task, it will find jobs that
211 # have been finished for at least this many seconds, and delete their
212 # stderr logs from the logs table.
213 clean_job_log_rows_after: <%= 30.days %>
215 # When you run the db:delete_old_container_logs task, it will find
216 # containers that have been finished for at least this many seconds,
217 # and delete their stdout, stderr, arv-mount, crunch-run, and
218 # crunchstat logs from the logs table.
219 clean_container_log_rows_after: <%= 30.days %>
221 # Time to keep audit logs, in seconds. (An audit log is a row added
222 # to the "logs" table in the PostgreSQL database each time an
223 # Arvados object is created, modified, or deleted.)
225 # Currently, websocket event notifications rely on audit logs, so
226 # this should not be set lower than 600 (5 minutes).
227 max_audit_log_age: 1209600
229 # Maximum number of log rows to delete in a single SQL transaction.
231 # If max_audit_log_delete_batch is 0, log entries will never be
232 # deleted by Arvados. Cleanup can be done by an external process
233 # without affecting any Arvados system processes, as long as very
234 # recent (<5 minutes old) logs are not deleted.
236 # 100000 is a reasonable batch size for most sites.
237 max_audit_log_delete_batch: 0
239 # The maximum number of compute nodes that can be in use simultaneously
240 # If this limit is reduced, any existing nodes with slot number >= new limit
241 # will not be counted against the new limit. In other words, the new limit
242 # won't be strictly enforced until those nodes with higher slot numbers
244 max_compute_nodes: 64
246 # These two settings control how frequently log events are flushed to the
247 # database. Log lines are buffered until either crunch_log_bytes_per_event
248 # has been reached or crunch_log_seconds_between_events has elapsed since
250 crunch_log_bytes_per_event: 4096
251 crunch_log_seconds_between_events: 1
253 # The sample period for throttling logs, in seconds.
254 crunch_log_throttle_period: 60
256 # Maximum number of bytes that job can log over crunch_log_throttle_period
257 # before being silenced until the end of the period.
258 crunch_log_throttle_bytes: 65536
260 # Maximum number of lines that job can log over crunch_log_throttle_period
261 # before being silenced until the end of the period.
262 crunch_log_throttle_lines: 1024
264 # Maximum bytes that may be logged by a single job. Log bytes that are
265 # silenced by throttling are not counted against this total.
266 crunch_limit_log_bytes_per_job: 67108864
268 crunch_log_partial_line_throttle_period: 5
270 # Attributes to suppress in events and audit logs. Notably,
271 # specifying ["manifest_text"] here typically makes the database
272 # smaller and faster.
274 # Warning: Using any non-empty value here can have undesirable side
275 # effects for any client or component that relies on event logs.
276 # Use at your own risk.
277 unlogged_attributes: []
279 # API methods to disable. Disabled methods are not listed in the
280 # discovery document, and respond 404 to all requests.
281 # Example: ["jobs.create", "pipeline_instances.create"]
282 disable_api_methods: []
285 ### Crunch, DNS & compute node management
288 # Docker image to be used when none found in runtime_constraints of a job
289 default_docker_image_for_jobs: false
291 # List of supported Docker Registry image formats that compute nodes
292 # are able to use. `arv keep docker` will error out if a user tries
293 # to store an image with an unsupported format. Use an empty array
294 # to skip the compatibility check (and display a warning message to
297 # Example for sites running docker < 1.10: ["v1"]
298 # Example for sites running docker >= 1.10: ["v2"]
299 # Example for disabling check: []
300 docker_image_formats: ["v2"]
302 # :none or :slurm_immediate
303 crunch_job_wrapper: :none
305 # username, or false = do not set uid when running jobs.
306 crunch_job_user: crunch
308 # The web service must be able to create/write this file, and
309 # crunch-job must be able to stat() it.
310 crunch_refresh_trigger: /tmp/crunch_refresh_trigger
312 # Path to dns server configuration directory
313 # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
314 # files or touch restart.txt (see below).
315 dns_server_conf_dir: false
317 # Template file for the dns server host snippets. See
318 # unbound.template in this directory for an example. If false, do
319 # not write any config files.
320 dns_server_conf_template: false
322 # String to write to {dns_server_conf_dir}/restart.txt (with a
323 # trailing newline) after updating local data. If false, do not
324 # open or write the restart.txt file.
325 dns_server_reload_command: false
327 # Command to run after each DNS update. Template variables will be
328 # substituted; see the "unbound" example below. If false, do not run
330 dns_server_update_command: false
332 ## Example for unbound:
333 #dns_server_conf_dir: /etc/unbound/conf.d
334 #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
335 ## ...plus one of the following two methods of reloading:
336 #dns_server_reload_command: unbound-control reload
337 #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
339 compute_node_domain: false
340 compute_node_nameservers:
343 # Hostname to assign to a compute node when it sends a "ping" and the
344 # hostname in its Node record is nil.
345 # During bootstrapping, the "ping" script is expected to notice the
346 # hostname given in the ping response, and update its unix hostname
348 # If false, leave the hostname alone (this is appropriate if your compute
349 # nodes' hostnames are already assigned by some other mechanism).
351 # One way or another, the hostnames of your node records should agree
352 # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
354 # Example for compute0000, compute0001, ....:
355 # assign_node_hostname: compute%<slot_number>04d
356 # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
357 assign_node_hostname: compute%<slot_number>d
361 ### Job and container reuse logic.
364 # Include details about job reuse decisions in the server log. This
365 # causes additional database queries to run, so it should not be
366 # enabled unless you expect to examine the resulting logs for
367 # troubleshooting purposes.
368 log_reuse_decisions: false
370 # Control job reuse behavior when two completed jobs match the
371 # search criteria and have different outputs.
373 # If true, in case of a conflict, reuse the earliest job (this is
374 # similar to container reuse behavior).
376 # If false, in case of a conflict, do not reuse any completed job,
377 # but do reuse an already-running job if available (this is the
378 # original job reuse behavior, and is still the default).
379 reuse_job_if_outputs_differ: false
382 ### Remaining assorted configuration options.
385 arvados_theme: default
387 # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the Single Sign
388 # On (sso) server. Should only be enabled during development when the SSO
389 # server is using a self-signed cert.
392 ## Set Time.zone default to the specified zone and make Active
393 ## Record auto-convert to this zone. Run "rake -D time" for a list
394 ## of tasks for finding time zone names. Default is UTC.
395 #time_zone: Central Time (US & Canada)
397 ## Default encoding used in templates for Ruby 1.9.
400 # Enable the asset pipeline
403 # Version of your assets, change this if you want to expire all your assets
404 assets.version: "1.0"
406 # Allow clients to create collections by providing a manifest with
407 # unsigned data blob locators. IMPORTANT: This effectively disables
408 # access controls for data stored in Keep: a client who knows a hash
409 # can write a manifest that references the hash, pass it to
410 # collections.create (which will create a permission link), use
411 # collections.get to obtain a signature for that data locator, and
412 # use that signed locator to retrieve the data from Keep. Therefore,
413 # do not turn this on if your users expect to keep data private from
415 permit_create_collection_with_unsigned_manifest: false
417 default_openid_prefix: https://www.google.com/accounts/o8/id
419 # Override the automatic version string. With the default value of
420 # false, the version string is read from git-commit.version in
421 # Rails.root (included in vendor packages) or determined by invoking
423 source_version: false
425 # Enable asynchronous permission graph rebuild. Must run
426 # script/permission-updater.rb as a separate process. When the permission
427 # cache is invalidated, the background process will update the permission
428 # graph cache. This feature is experimental!
429 async_permissions_update: false
431 # Default value for container_count_max for container requests. This is the
432 # number of times Arvados will create a new container to satisfy a container
433 # request. If a container is cancelled it will retry a new container if
434 # container_count < container_count_max on any container requests associated
435 # with the cancelled container.
436 container_count_max: 3
438 # Default value for keep_cache_ram of a container's runtime_constraints.
439 container_default_keep_cache_ram: 268435456
445 consider_all_requests_local: true
446 action_controller.perform_caching: false
447 action_mailer.raise_delivery_errors: false
448 action_mailer.perform_deliveries: false
449 active_support.deprecation: :log
450 action_dispatch.best_standards_support: :builtin
451 active_record.auto_explain_threshold_in_seconds: 0.5
452 assets.compress: false
458 consider_all_requests_local: false
459 action_controller.perform_caching: true
460 serve_static_files: false
461 assets.compress: true
462 assets.compile: false
468 serve_static_files: true
469 static_cache_control: public, max-age=3600
471 consider_all_requests_local: true
472 action_controller.perform_caching: false
473 action_dispatch.show_exceptions: false
474 action_controller.allow_forgery_protection: false
475 action_mailer.delivery_method: :test
476 active_support.deprecation: :stderr
478 sso_app_id: arvados-server
479 sso_app_secret: <%= rand(2**512).to_s(36) %>
480 sso_provider_url: http://localhost:3002
481 secret_token: <%= rand(2**512).to_s(36) %>
482 blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
483 user_profile_notification_address: arvados@example.com
484 workbench_address: https://localhost:3001/
485 git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
486 git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
487 websocket_address: "wss://0.0.0.0:<%= ENV['ARVADOS_TEST_WSS_PORT'] %>/websocket"
488 trash_sweep_interval: -1
489 docker_image_formats: ["v1"]