blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
user_profile_notification_address: arvados@example.com
workbench_address: https://localhost:3001/
+ git_repositories_dir: <%= Rails.root.join 'tmp', 'git', 'test' %>
+ git_internal_dir: <%= Rails.root.join 'tmp', 'internal.git' %>
common:
# The prefix used for all database identifiers to identify the record as
# logic for deciding on a hostname.
host: false
+ # Base part of SSH git clone url given with repository resources. If
+ # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is
+ # used. If false, SSH clone URLs are not advertised. Include a
+ # trailing ":" or "/" if needed: it will not be added automatically.
+ git_repo_ssh_base: true
+
+ # Base part of HTTPS git clone urls given with repository
+ # resources. This is expected to be an arv-git-httpd service which
+ # accepts API tokens as HTTP-auth passwords. If true, the default
+ # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
+ # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
+ # if needed: it will not be added automatically.
+ git_repo_https_base: true
+
# If this is not false, HTML requests at the API server's root URL
# are redirected to this location, and it is provided in the text of
# user activation notification email messages to remind them where
# {git_repositories_dir}/arvados/.git
git_repositories_dir: /var/lib/arvados/git
- # If an arv-git-httpd service is running, advertise it in the
- # discovery document by adding its public URI base here. Example:
- # https://git.xxxxx.arvadosapi.com
- git_http_base: false
-
# This is a (bare) repository that stores commits used in jobs. When a job
# runs, the source commits are first fetched into this repository, then this
# repository is used to deploy to compute nodes. This should NOT be a
# a site secret. It should be at least 50 characters.
blob_signing_key: ~
- # Amount of time (in seconds) for which a blob permission signature
- # remains valid. Default: 2 weeks (1209600 seconds)
- blob_signing_ttl: 1209600
+ # Lifetime (in seconds) of blob permission signatures generated by
+ # the API server. This determines how long a client can take (after
+ # retrieving a collection record) to retrieve the collection data
+ # from Keep. If the client needs more time than that (assuming the
+ # collection still has the same content and the relevant user/token
+ # still has permission) the client can retrieve the collection again
+ # to get fresh signatures.
+ #
+ # Datamanager considers an unreferenced block older than this to be
+ # eligible for garbage collection. Therefore, it should never be
+ # smaller than the corresponding value used by any local keepstore
+ # service (see keepstore -blob-signature-ttl flag). This rule
+ # prevents datamanager from trying to garbage-collect recently
+ # written blocks while clients are still holding valid signatures.
+ #
+ # The default is 2 weeks.
+ blob_signature_ttl: 1209600
# Allow clients to create collections by providing a manifest with
# unsigned data blob locators. IMPORTANT: This effectively disables
# should be at least 50 characters.
secret_token: ~
- # email address to which mail should be sent when the user creates profile for the first time
+ # Email address to notify whenever a user creates a profile for the
+ # first time
user_profile_notification_address: false
default_openid_prefix: https://www.google.com/accounts/o8/id
# Note you must separately configure the upstream web server or proxy to
# actually enforce the desired maximum request size on the server side.
max_request_size: 134217728
+
+ # Stop collecting records for an index request after we read this much
+ # data (in bytes) from large database columns.
+ # Currently only `GET /collections` respects this parameter, when the
+ # user requests an index that includes manifest_text. Once the API
+ # server collects records with a total manifest_text size at or above
+ # this amount, it returns those results immediately.
+ # Note this is a threshold, not a limit. Record collection stops
+ # *after* reading this much data.
+ max_index_database_read: 134217728
+
+ # When you run the db:delete_old_job_logs task, it will find jobs that
+ # have been finished for at least this many seconds, and delete their
+ # stderr logs from the logs table.
+ clean_job_log_rows_after: <%= 30.days %>