1 # Do not use this file for site configuration. Create application.yml
2 # instead (see application.yml.example).
8 consider_all_requests_local: true
9 action_controller.perform_caching: false
10 action_mailer.raise_delivery_errors: false
11 action_mailer.perform_deliveries: false
12 active_support.deprecation: :log
13 action_dispatch.best_standards_support: :builtin
14 active_record.mass_assignment_sanitizer: :strict
15 active_record.auto_explain_threshold_in_seconds: 0.5
16 assets.compress: false
18 local_modified: "<%= '-modified' if `git status -s` != '' %>"
23 consider_all_requests_local: false
24 action_controller.perform_caching: true
25 serve_static_assets: false
33 serve_static_assets: true
34 static_cache_control: public, max-age=3600
36 consider_all_requests_local: true
37 action_controller.perform_caching: false
38 action_dispatch.show_exceptions: false
39 action_controller.allow_forgery_protection: false
40 action_mailer.delivery_method: :test
41 active_support.deprecation: :stderr
42 active_record.mass_assignment_sanitizer: :strict
44 secret_token: <%= rand(2**512).to_s(36) %>
45 blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
46 user_profile_notification_address: arvados@example.com
47 workbench_address: https://localhost:3001/
48 git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
49 git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
52 # The prefix used for all database identifiers to identify the record as
53 # originating from this site. Must be exactly 5 alphanumeric characters
54 # (lowercase ASCII letters and digits).
55 uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
57 # If not false, this is the hostname that will be used for root_url and
58 # advertised in the discovery document. By default, use the default Rails
59 # logic for deciding on a hostname.
62 # Base part of SSH git clone url given with repository resources. If
63 # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
64 # used. If false, SSH clone URLs are not advertised. Include a
65 # trailing ":" or "/" if needed: it will not be added automatically.
66 git_repo_ssh_base: true
68 # Base part of HTTPS git clone urls given with repository
69 # resources. This is expected to be an arv-git-httpd service which
70 # accepts API tokens as HTTP-auth passwords. If true, the default
71 # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
72 # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
73 # if needed: it will not be added automatically.
74 git_repo_https_base: true
76 # If this is not false, HTML requests at the API server's root URL
77 # are redirected to this location, and it is provided in the text of
78 # user activation notification email messages to remind them where
80 workbench_address: false
82 # Git repositories must be readable by api server, or you won't be
83 # able to submit crunch jobs. To pass the test suites, put a clone
84 # of the arvados tree in {git_repositories_dir}/arvados.git or
85 # {git_repositories_dir}/arvados/.git
86 git_repositories_dir: /var/lib/arvados/git
88 # This is a (bare) repository that stores commits used in jobs. When a job
89 # runs, the source commits are first fetched into this repository, then this
90 # repository is used to deploy to compute nodes. This should NOT be a
91 # subdirectory of {git_repositiories_dir}.
92 git_internal_dir: /var/lib/arvados/internal.git
94 # :none or :slurm_immediate
95 crunch_job_wrapper: :none
97 # username, or false = do not set uid when running jobs.
98 crunch_job_user: crunch
100 # The web service must be able to create/write this file, and
101 # crunch-job must be able to stat() it.
102 crunch_refresh_trigger: /tmp/crunch_refresh_trigger
104 # These two settings control how frequently log events are flushed to the
105 # database. Log lines are buffered until either crunch_log_bytes_per_event
106 # has been reached or crunch_log_seconds_between_events has elapsed since
108 crunch_log_bytes_per_event: 4096
109 crunch_log_seconds_between_events: 1
111 # The sample period for throttling logs, in seconds.
112 crunch_log_throttle_period: 60
114 # Maximum number of bytes that job can log over crunch_log_throttle_period
115 # before being silenced until the end of the period.
116 crunch_log_throttle_bytes: 65536
118 # Maximum number of lines that job can log over crunch_log_throttle_period
119 # before being silenced until the end of the period.
120 crunch_log_throttle_lines: 1024
122 # Maximum bytes that may be logged by a single job. Log bytes that are
123 # silenced by throttling are not counted against this total.
124 crunch_limit_log_bytes_per_job: 67108864
126 # Path to dns server configuration directory (e.g. /etc/unbound.d/conf.d),
127 # or false = do not update dns server data.
128 dns_server_conf_dir: false
130 # Template for the dns server host snippets. See unbound.template in this directory for
131 # an example. Set to false to disable.
132 dns_server_conf_template: false
134 # Dns server reload command, or false = do not reload dns server after data change
135 dns_server_reload_command: false
137 # Example for unbound
138 #dns_server_conf_dir: /etc/unbound/conf.d
139 #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
140 #dns_server_reload_command: /etc/init.d/unbound reload
142 compute_node_domain: false
143 compute_node_nameservers:
146 # The version below is suitable for AWS.
147 # To use it, copy it to your application.yml, uncomment, and change <%# to <%=
148 # compute_node_nameservers: <%#
150 # ['local', 'public'].collect do |iface|
151 # Net::HTTP.get(URI("http://169.254.169.254/latest/meta-data/#{iface}-ipv4")).match(/^[\d\.]+$/)[0]
152 # end << '172.16.0.23'
157 # When new_users_are_active is set to true, the user agreement check is skipped.
158 new_users_are_active: false
160 admin_notifier_email_from: arvados@example.com
161 email_subject_prefix: "[ARVADOS] "
162 user_notifier_email_from: arvados@example.com
163 new_user_notification_recipients: [ ]
164 new_inactive_user_notification_recipients: [ ]
166 # The e-mail address of the user you would like to become marked as an admin
167 # user on their first login.
168 # In the default configuration, authentication happens through the Arvados SSO
169 # server, which uses openid against Google's servers, so in that case this
170 # should be an address associated with a Google account.
171 auto_admin_user: false
173 # If auto_admin_first_user is set to true, the first user to log in when no
174 # other admin users exist will automatically become an admin user.
175 auto_admin_first_user: false
177 ## Set Time.zone default to the specified zone and make Active
178 ## Record auto-convert to this zone. Run "rake -D time" for a list
179 ## of tasks for finding time zone names. Default is UTC.
180 #time_zone: Central Time (US & Canada)
182 ## Default encoding used in templates for Ruby 1.9.
185 # Enable the asset pipeline
188 # Version of your assets, change this if you want to expire all your assets
189 assets.version: "1.0"
191 arvados_theme: default
193 # The ARVADOS_WEBSOCKETS environment variable determines whether to
194 # serve http, websockets, or both.
196 # If ARVADOS_WEBSOCKETS="true", http and websockets are both served
197 # from the same process.
199 # If ARVADOS_WEBSOCKETS="ws-only", only websockets is served.
201 # If ARVADOS_WEBSOCKETS="false" or not set at all, only http is
202 # served. In this case, you should have a separate process serving
203 # websockets, and the address of that service should be given here
204 # as websocket_address.
206 # If websocket_address is false (which is the default), the
207 # discovery document will tell clients to use the current server as
208 # the websocket service, or (if the current server does not have
209 # websockets enabled) not to use websockets at all.
211 # Example: Clients will connect to the specified endpoint.
212 #websocket_address: wss://127.0.0.1:3333/websocket
213 # Default: Clients will connect to this server if it's running
214 # websockets, otherwise none at all.
215 websocket_address: false
217 # blob_signing_key is a string of alphanumeric characters used to
218 # generate permission signatures for Keep locators. It must be
219 # identical to the permission key given to Keep. IMPORTANT: This is
220 # a site secret. It should be at least 50 characters.
223 # Lifetime (in seconds) of blob permission signatures generated by
224 # the API server. This determines how long a client can take (after
225 # retrieving a collection record) to retrieve the collection data
226 # from Keep. If the client needs more time than that (assuming the
227 # collection still has the same content and the relevant user/token
228 # still has permission) the client can retrieve the collection again
229 # to get fresh signatures.
231 # Datamanager considers an unreferenced block older than this to be
232 # eligible for garbage collection. Therefore, it should never be
233 # smaller than the corresponding value used by any local keepstore
234 # service (see keepstore -blob-signing-ttl flag). This rule prevents
235 # datamanager from trying to garbage-collect recently written blocks
236 # while clients are still holding valid signatures.
238 # The default is 2 weeks.
239 blob_signature_ttl: 1209600
241 # Allow clients to create collections by providing a manifest with
242 # unsigned data blob locators. IMPORTANT: This effectively disables
243 # access controls for data stored in Keep: a client who knows a hash
244 # can write a manifest that references the hash, pass it to
245 # collections.create (which will create a permission link), use
246 # collections.get to obtain a signature for that data locator, and
247 # use that signed locator to retrieve the data from Keep. Therefore,
248 # do not turn this on if your users expect to keep data private from
250 permit_create_collection_with_unsigned_manifest: false
252 # secret_token is a string of alphanumeric characters used by Rails
253 # to sign session tokens. IMPORTANT: This is a site secret. It
254 # should be at least 50 characters.
257 # Email address to notify whenever a user creates a profile for the
259 user_profile_notification_address: false
261 default_openid_prefix: https://www.google.com/accounts/o8/id
263 # Config parameters to automatically setup new users.
264 # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
265 # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
266 auto_setup_new_users: false
267 auto_setup_new_users_with_vm_uuid: false
268 auto_setup_new_users_with_repository: false
269 auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
272 source_version: "<%= `git log -n 1 --format=%h`.strip %>"
273 local_modified: false
275 # Default lifetime for ephemeral collections: 2 weeks.
276 default_trash_lifetime: 1209600
278 # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the Single Sign
279 # On (sso) server. Should only be enabled during development when the SSO
280 # server is using a self-signed cert.
283 # Default replication level for collections. This is used when a
284 # collection's replication_desired attribute is nil.
285 default_collection_replication: 2
287 # Maximum size (in bytes) allowed for a single API request that will be
288 # published in the discovery document for use by clients.
289 # Note you must separately configure the upstream web server or proxy to
290 # actually enforce the desired maximum request size on the server side.
291 max_request_size: 134217728
293 # Stop collecting records for an index request after we read this much
294 # data (in bytes) from large database columns.
295 # Currently only `GET /collections` respects this parameter, when the
296 # user requests an index that includes manifest_text. Once the API
297 # server collects records with a total manifest_text size at or above
298 # this amount, it returns those results immediately.
299 # Note this is a threshold, not a limit. Record collection stops
300 # *after* reading this much data.
301 max_index_database_read: 134217728
303 # When you run the db:delete_old_job_logs task, it will find jobs that
304 # have been finished for at least this many seconds, and delete their
305 # stderr logs from the logs table.
306 clean_job_log_rows_after: <%= 30.days %>