active_record.auto_explain_threshold_in_seconds: 0.5
assets.compress: false
assets.debug: true
+ local_modified: "<%= '-modified' if `git status -s` != '' %>"
production:
force_ssl: true
uuid_prefix: zzzzz
secret_token: <%= rand(2**512).to_s(36) %>
blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
-
- # email address to which mail should be sent when the user creates profile for the first time
user_profile_notification_address: arvados@example.com
+ workbench_address: https://localhost:3001/
+ git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
+ git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
common:
+ # The prefix used for all database identifiers to identify the record as
+ # originating from this site. Must be exactly 5 alphanumeric characters
+ # (lowercase ASCII letters and digits).
uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
+ # If not false, this is the hostname that will be used for root_url and
+ # advertised in the discovery document. By default, use the default Rails
+ # logic for deciding on a hostname.
+ host: false
+
+ # Base part of SSH git clone url given with repository resources. If
+ # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
+ # used. If false, SSH clone URLs are not advertised. Include a
+ # trailing ":" or "/" if needed: it will not be added automatically.
+ git_repo_ssh_base: true
+
+ # Base part of HTTPS git clone urls given with repository
+ # resources. This is expected to be an arv-git-httpd service which
+ # accepts API tokens as HTTP-auth passwords. If true, the default
+ # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
+ # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
+ # if needed: it will not be added automatically.
+ git_repo_https_base: true
+
+ # If this is not false, HTML requests at the API server's root URL
+ # are redirected to this location, and it is provided in the text of
+ # user activation notification email messages to remind them where
+ # to log in.
+ workbench_address: false
+
# Git repositories must be readable by api server, or you won't be
# able to submit crunch jobs. To pass the test suites, put a clone
# of the arvados tree in {git_repositories_dir}/arvados.git or
# crunch-job must be able to stat() it.
crunch_refresh_trigger: /tmp/crunch_refresh_trigger
- # Maximum number of log events that may be generated by a single job.
- crunch_limit_log_events_per_job: 65536
-
- # Maximum number of total bytes that may be logged by a single job.
- crunch_limit_log_event_bytes_per_job: 67108864
-
- # These two settings control how frequently log events are flushed
- # to the database. If a job generates two or more events within
- # crunch_log_seconds_between_events, the log data is not flushed
- # until crunch_log_bytes_per_event has been reached.
+ # These two settings control how frequently log events are flushed to the
+ # database. Log lines are buffered until either crunch_log_bytes_per_event
+ # has been reached or crunch_log_seconds_between_events has elapsed since
+ # the last flush.
crunch_log_bytes_per_event: 4096
crunch_log_seconds_between_events: 1
- # Path to /etc/dnsmasq.d, or false = do not update dnsmasq data.
- dnsmasq_conf_dir: false
+ # The sample period for throttling logs, in seconds.
+ crunch_log_throttle_period: 60
+
+ # Maximum number of bytes that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ crunch_log_throttle_bytes: 65536
+
+ # Maximum number of lines that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ crunch_log_throttle_lines: 1024
+
+ # Maximum bytes that may be logged by a single job. Log bytes that are
+ # silenced by throttling are not counted against this total.
+ crunch_limit_log_bytes_per_job: 67108864
+
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
+ dns_server_conf_dir: false
+
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
+ dns_server_conf_template: false
- # Set to AMI id (ami-123456) to auto-start nodes. See app/models/node.rb
- compute_node_ami: false
- compute_node_ec2run_args: -g arvados-compute
- compute_node_spot_bid: 0.11
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
+ dns_server_reload_command: false
+
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ dns_server_update_command: false
+
+ ## Example for unbound:
+ #dns_server_conf_dir: /etc/unbound/conf.d
+ #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
+ ## ...plus one of the following two methods of reloading:
+ #dns_server_reload_command: unbound-control reload
+ #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
compute_node_domain: false
compute_node_nameservers:
- 192.168.1.1
- compute_node_ec2_tag_enable: false
# The version below is suitable for AWS.
# To use it, copy it to your application.yml, uncomment, and change <%# to <%=
new_user_notification_recipients: [ ]
new_inactive_user_notification_recipients: [ ]
- # Visitors to the API server will be redirected to the workbench
- workbench_address: https://workbench.local:3001/
-
# The e-mail address of the user you would like to become marked as an admin
# user on their first login.
# In the default configuration, authentication happens through the Arvados SSO
# should be an address associated with a Google account.
auto_admin_user: false
+ # If auto_admin_first_user is set to true, the first user to log in when no
+ # other admin users exist will automatically become an admin user.
+ auto_admin_first_user: false
+
## Set Time.zone default to the specified zone and make Active
## Record auto-convert to this zone. Run "rake -D time" for a list
## of tasks for finding time zone names. Default is UTC.
arvados_theme: default
- # Default: do not advertise a websocket server.
- websocket_address: false
-
- # You can run the websocket server separately from the regular HTTP service
- # by setting "ARVADOS_WEBSOCKETS=ws-only" in the environment before running
- # the websocket server. When you do this, you need to set the following
- # configuration variable so that the primary server can give out the correct
- # address of the dedicated websocket server:
+ # The ARVADOS_WEBSOCKETS environment variable determines whether to
+ # serve http, websockets, or both.
+ #
+ # If ARVADOS_WEBSOCKETS="true", http and websockets are both served
+ # from the same process.
+ #
+ # If ARVADOS_WEBSOCKETS="ws-only", only websockets is served.
+ #
+ # If ARVADOS_WEBSOCKETS="false" or not set at all, only http is
+ # served. In this case, you should have a separate process serving
+ # websockets, and the address of that service should be given here
+ # as websocket_address.
+ #
+ # If websocket_address is false (which is the default), the
+ # discovery document will tell clients to use the current server as
+ # the websocket service, or (if the current server does not have
+ # websockets enabled) not to use websockets at all.
+ #
+ # Example: Clients will connect to the specified endpoint.
#websocket_address: wss://127.0.0.1:3333/websocket
+ # Default: Clients will connect to this server if it's running
+ # websockets, otherwise none at all.
+ websocket_address: false
# blob_signing_key is a string of alphanumeric characters used to
# generate permission signatures for Keep locators. It must be
# a site secret. It should be at least 50 characters.
blob_signing_key: ~
- # Amount of time (in seconds) for which a blob permission signature
- # remains valid. Default: 2 weeks (1209600 seconds)
- blob_signing_ttl: 1209600
+ # Lifetime (in seconds) of blob permission signatures generated by
+ # the API server. This determines how long a client can take (after
+ # retrieving a collection record) to retrieve the collection data
+ # from Keep. If the client needs more time than that (assuming the
+ # collection still has the same content and the relevant user/token
+ # still has permission) the client can retrieve the collection again
+ # to get fresh signatures.
+ #
+ # Datamanager considers an unreferenced block older than this to be
+ # eligible for garbage collection. Therefore, it should never be
+ # smaller than the corresponding value used by any local keepstore
+ # service (see keepstore -blob-signature-ttl flag). This rule
+ # prevents datamanager from trying to garbage-collect recently
+ # written blocks while clients are still holding valid signatures.
+ #
+ # The default is 2 weeks.
+ blob_signature_ttl: 1209600
# Allow clients to create collections by providing a manifest with
# unsigned data blob locators. IMPORTANT: This effectively disables
# should be at least 50 characters.
secret_token: ~
- # email address to which mail should be sent when the user creates profile for the first time
+ # Email address to notify whenever a user creates a profile for the
+ # first time
user_profile_notification_address: false
default_openid_prefix: https://www.google.com/accounts/o8/id
auto_setup_new_users_with_vm_uuid: false
auto_setup_new_users_with_repository: false
auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+ # source_version
+ source_version: "<%= `git log -n 1 --format=%h`.strip %>"
+ local_modified: false
+
+ # Default lifetime for ephemeral collections: 2 weeks.
+ default_trash_lifetime: 1209600
+
+ # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the Single Sign
+ # On (sso) server. Should only be enabled during development when the SSO
+ # server is using a self-signed cert.
+ sso_insecure: false
+
+ # Default replication level for collections. This is used when a
+ # collection's replication_desired attribute is nil.
+ default_collection_replication: 2
+
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
+ max_request_size: 134217728
+
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this only limits the
+ # arvados.v1.collections.list API (GET /arvados/v1/collections), and
+ # only takes the size of manifest_text into account. Other fields
+ # (e.g., "properties" hashes) are not counted against this limit
+ # when returning collections, and the limit is not applied at all
+ # for other data types.
+ max_index_database_read: 134217728
+
+ # When you run the db:delete_old_job_logs task, it will find jobs that
+ # have been finished for at least this many seconds, and delete their
+ # stderr logs from the logs table.
+ clean_job_log_rows_after: <%= 30.days %>
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ max_compute_nodes: 64
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ default_docker_image_for_jobs: false