X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/c550609485691d8107ae364bfc982569f81f1725..dd5deb94678d0a0054cf1c4d7dafe864c86094fd:/services/api/config/application.default.yml diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml index 0d936b713f..d46c3978a6 100644 --- a/services/api/config/application.default.yml +++ b/services/api/config/application.default.yml @@ -59,10 +59,19 @@ common: # logic for deciding on a hostname. host: false - # If not false, this is the hostname that will be used to generate fetch_url - # and push_url for git repositories. By default, this will be - # git.(uuid_prefix).arvadosapi.com - git_host: false + # Base part of SSH git clone url given with repository resources. If + # true, the default "git@git.(uuid_prefix).arvadosapi.com:" is + # used. If false, SSH clone URLs are not advertised. Include a + # trailing ":" or "/" if needed: it will not be added automatically. + git_repo_ssh_base: true + + # Base part of HTTPS git clone urls given with repository + # resources. This is expected to be an arv-git-httpd service which + # accepts API tokens as HTTP-auth passwords. If true, the default + # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false, + # HTTPS clone URLs are not advertised. Include a trailing ":" or "/" + # if needed: it will not be added automatically. + git_repo_https_base: true # If this is not false, HTML requests at the API server's root URL # are redirected to this location, and it is provided in the text of @@ -76,11 +85,6 @@ common: # {git_repositories_dir}/arvados/.git git_repositories_dir: /var/lib/arvados/git - # If an arv-git-httpd service is running, advertise it in the - # discovery document by adding its public URI base here. Example: - # https://git.xxxxx.arvadosapi.com - git_http_base: false - # This is a (bare) repository that stores commits used in jobs. When a job # runs, the source commits are first fetched into this repository, then this # repository is used to deploy to compute nodes. This should NOT be a @@ -216,9 +220,23 @@ common: # a site secret. It should be at least 50 characters. blob_signing_key: ~ - # Amount of time (in seconds) for which a blob permission signature - # remains valid. Default: 2 weeks (1209600 seconds) - blob_signing_ttl: 1209600 + # Lifetime (in seconds) of blob permission signatures generated by + # the API server. This determines how long a client can take (after + # retrieving a collection record) to retrieve the collection data + # from Keep. If the client needs more time than that (assuming the + # collection still has the same content and the relevant user/token + # still has permission) the client can retrieve the collection again + # to get fresh signatures. + # + # Datamanager considers an unreferenced block older than this to be + # eligible for garbage collection. Therefore, it should never be + # smaller than the corresponding value used by any local keepstore + # service (see keepstore -blob-signature-ttl flag). This rule + # prevents datamanager from trying to garbage-collect recently + # written blocks while clients are still holding valid signatures. + # + # The default is 2 weeks. + blob_signature_ttl: 1209600 # Allow clients to create collections by providing a manifest with # unsigned data blob locators. IMPORTANT: This effectively disables @@ -236,7 +254,8 @@ common: # should be at least 50 characters. secret_token: ~ - # email address to which mail should be sent when the user creates profile for the first time + # Email address to notify whenever a user creates a profile for the + # first time user_profile_notification_address: false default_openid_prefix: https://www.google.com/accounts/o8/id @@ -270,3 +289,28 @@ common: # Note you must separately configure the upstream web server or proxy to # actually enforce the desired maximum request size on the server side. max_request_size: 134217728 + + # Stop collecting records for an index request after we read this much + # data (in bytes) from large database columns. + # Currently only `GET /collections` respects this parameter, when the + # user requests an index that includes manifest_text. Once the API + # server collects records with a total manifest_text size at or above + # this amount, it returns those results immediately. + # Note this is a threshold, not a limit. Record collection stops + # *after* reading this much data. + max_index_database_read: 134217728 + + # When you run the db:delete_old_job_logs task, it will find jobs that + # have been finished for at least this many seconds, and delete their + # stderr logs from the logs table. + clean_job_log_rows_after: <%= 30.days %> + + # The maximum number of compute nodes that can be in use simultaneously + # If this limit is reduced, any existing nodes with slot number >= new limit + # will not be counted against the new limit. In other words, the new limit + # won't be strictly enforced until those nodes with higher slot numbers + # go down. + max_compute_nodes: 64 + + # Docker image to be used when none found in runtime_constraints of a job + default_docker_image_for_jobs: false