X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/89f541d2b534b9cad4ee668f702f1270bf056171..53b2e5895715c73febffb563ebc89153339e02ab:/lib/config/generated_config.go diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go index b15bf7eebc..4742c64058 100644 --- a/lib/config/generated_config.go +++ b/lib/config/generated_config.go @@ -58,6 +58,9 @@ Clusters: DispatchCloud: InternalURLs: {SAMPLE: {}} ExternalURL: "-" + DispatchLSF: + InternalURLs: {SAMPLE: {}} + ExternalURL: "-" Keepproxy: InternalURLs: {SAMPLE: {}} ExternalURL: "" @@ -276,6 +279,7 @@ Clusters: AdminNotifierEmailFrom: arvados@example.com EmailSubjectPrefix: "[ARVADOS] " UserNotifierEmailFrom: arvados@example.com + UserNotifierEmailBcc: {} NewUserNotificationRecipients: {} NewInactiveUserNotificationRecipients: {} @@ -462,6 +466,13 @@ Clusters: # long-running balancing operation. BalanceTimeout: 6h + # Maximum number of replication_confirmed / + # storage_classes_confirmed updates to write to the database + # after a rebalancing run. When many updates are needed, this + # spreads them over a few runs rather than applying them all at + # once. + BalanceUpdateLimit: 100000 + # Default lifetime for ephemeral collections: 2 weeks. This must not # be less than BlobSigningTTL. DefaultTrashLifetime: 336h @@ -523,10 +534,10 @@ Clusters: # WebDAV would have to expose XSS vulnerabilities in order to # handle the redirect (see discussion on Services.WebDAV). # - # This setting has no effect in the recommended configuration, - # where the WebDAV is configured to have a separate domain for - # every collection; in this case XSS protection is provided by - # browsers' same-origin policy. + # This setting has no effect in the recommended configuration, where the + # WebDAV service is configured to have a separate domain for every + # collection and XSS protection is provided by browsers' same-origin + # policy. # # The default setting (false) is appropriate for a multi-user site. TrustAllContent: false @@ -548,9 +559,6 @@ Clusters: # Approximate memory limit (in bytes) for collection cache. MaxCollectionBytes: 100000000 - # Permission cache entries. - MaxPermissionEntries: 1000 - # UUID cache entries. MaxUUIDEntries: 1000 @@ -681,7 +689,7 @@ Clusters: AcceptAccessTokenScope: "" PAM: - # (Experimental) Use PAM to authenticate users. + # Use PAM to authenticate users. Enable: false # PAM service name. PAM will apply the policy in the @@ -879,8 +887,8 @@ Clusters: UsePreemptibleInstances: false # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the - # (experimental) cloud dispatcher for executing containers on - # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n" + # cloud dispatcher for executing containers on worker VMs. + # Begins with "-----BEGIN RSA PRIVATE KEY-----\n" # and ends with "\n-----END RSA PRIVATE KEY-----\n". DispatchPrivateKey: "" @@ -906,9 +914,45 @@ Clusters: # Minimum time between two attempts to run the same container MinRetryPeriod: 0s - # Container runtime: "docker" (default) or "singularity" (experimental) + # Container runtime: "docker" (default) or "singularity" RuntimeEngine: docker + # When running a container, run a dedicated keepstore process, + # using the specified number of 64 MiB memory buffers per + # allocated CPU core (VCPUs in the container's runtime + # constraints). The dedicated keepstore handles I/O for + # collections mounted in the container, as well as saving + # container logs. + # + # A zero value disables this feature. + # + # In order for this feature to be activated, no volume may use + # AccessViaHosts, and each volume must have Replication higher + # than Collections.DefaultReplication. If these requirements are + # not satisfied, the feature is disabled automatically + # regardless of the value given here. + # + # Note that when this configuration is enabled, the entire + # cluster configuration file, including the system root token, + # is copied to the worker node and held in memory for the + # duration of the container. + LocalKeepBlobBuffersPerVCPU: 1 + + # When running a dedicated keepstore process for a container + # (see LocalKeepBlobBuffersPerVCPU), write keepstore log + # messages to keepstore.txt in the container's log collection. + # + # These log messages can reveal some volume configuration + # details, error messages from the cloud storage provider, etc., + # which are not otherwise visible to users. + # + # Accepted values: + # * "none" -- no keepstore.txt file + # * "all" -- all logs, including request and response lines + # * "errors" -- all logs except "response" logs with 2xx + # response codes and "request" logs + LocalKeepLogsToContainerLog: none + Logging: # When you run the db:delete_old_container_logs task, it will find # containers that have been finished for at least this many seconds, @@ -1018,6 +1062,24 @@ Clusters: # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) AssignNodeHostname: "compute%d" + LSF: + # Additional arguments to bsub when submitting Arvados + # containers as LSF jobs. + # + # Note that the default arguments cause LSF to write two files + # in /tmp on the compute node each time an Arvados container + # runs. Ensure you have something in place to delete old files + # from /tmp, or adjust these arguments accordingly. + BsubArgumentsList: ["-o", "/tmp/crunch-run.%J.out", "-e", "/tmp/crunch-run.%J.err"] + + # Use sudo to switch to this user account when submitting LSF + # jobs. + # + # This account must exist on the hosts where LSF jobs run + # ("execution hosts"), as well as on the host where the + # Arvados LSF dispatcher runs ("submission host"). + BsubSudoUser: "crunch" + JobsAPI: # Enable the legacy 'jobs' API (crunch v1). This value must be a string. # @@ -1037,7 +1099,7 @@ Clusters: GitInternalDir: /var/lib/arvados/internal.git CloudVMs: - # Enable the cloud scheduler (experimental). + # Enable the cloud scheduler. Enable: false # Name/number of port where workers' SSH services listen. @@ -1049,7 +1111,7 @@ Clusters: # Shell command to execute on each worker to determine whether # the worker is booted and ready to run containers. It should # exit zero if the worker is ready. - BootProbeCommand: "docker ps -q" + BootProbeCommand: "systemctl is-system-running" # Minimum interval between consecutive probes to a single # worker. @@ -1071,13 +1133,25 @@ Clusters: # Maximum create/destroy-instance operations per second (0 = # unlimited). - MaxCloudOpsPerSecond: 0 + MaxCloudOpsPerSecond: 10 - # Maximum concurrent node creation operations (0 = unlimited). This is - # recommended by Azure in certain scenarios (see - # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image) - # and can be used with other cloud providers too, if desired. - MaxConcurrentInstanceCreateOps: 0 + # Maximum concurrent instance creation operations (0 = unlimited). + # + # MaxConcurrentInstanceCreateOps limits the number of instance creation + # requests that can be in flight at any one time, whereas + # MaxCloudOpsPerSecond limits the number of create/destroy operations + # that can be started per second. + # + # Because the API for instance creation on Azure is synchronous, it is + # recommended to increase MaxConcurrentInstanceCreateOps when running + # on Azure. When using managed images, a value of 20 would be + # appropriate. When using Azure Shared Image Galeries, it could be set + # higher. For more information, see + # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image + # + # MaxConcurrentInstanceCreateOps can be increased for other cloud + # providers too, if desired. + MaxConcurrentInstanceCreateOps: 1 # Interval between cloud provider syncs/updates ("list all # instances"). @@ -1211,6 +1285,29 @@ Clusters: Price: 0.1 Preemptible: false + StorageClasses: + + # If you use multiple storage classes, specify them here, using + # the storage class name as the key (in place of "SAMPLE" in + # this sample entry). + # + # Further info/examples: + # https://doc.arvados.org/admin/storage-classes.html + SAMPLE: + + # Priority determines the order volumes should be searched + # when reading data, in cases where a keepstore server has + # access to multiple volumes with different storage classes. + Priority: 0 + + # Default determines which storage class(es) should be used + # when a user/client writes data or saves a new collection + # without specifying storage classes. + # + # If any StorageClasses are configured, at least one of them + # must have Default: true. + Default: true + Volumes: SAMPLE: # AccessViaHosts specifies which keepstore processes can read @@ -1234,7 +1331,9 @@ Clusters: ReadOnly: false Replication: 1 StorageClasses: - default: true + # If you have configured storage classes (see StorageClasses + # section above), add an entry here for each storage class + # satisfied by this volume. SAMPLE: true Driver: S3 DriverParameters: @@ -1252,6 +1351,7 @@ Clusters: ConnectTimeout: 1m ReadTimeout: 10m RaceWindow: 24h + PrefixLength: 0 # Use aws-s3-go (v2) instead of goamz UseAWSS3v2Driver: false