X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/69b64f2fe8ccd2b92d1aa55b9fb39b03a342468e..22516d3663a3c11384824dad0e052dc0630f08f0:/lib/config/config.default.yml diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml index 537db67623..52e35d83ff 100644 --- a/lib/config/config.default.yml +++ b/lib/config/config.default.yml @@ -52,9 +52,9 @@ Clusters: DispatchCloud: InternalURLs: {SAMPLE: {}} ExternalURL: "-" - SSO: + DispatchLSF: InternalURLs: {SAMPLE: {}} - ExternalURL: "" + ExternalURL: "-" Keepproxy: InternalURLs: {SAMPLE: {}} ExternalURL: "" @@ -273,6 +273,7 @@ Clusters: AdminNotifierEmailFrom: arvados@example.com EmailSubjectPrefix: "[ARVADOS] " UserNotifierEmailFrom: arvados@example.com + UserNotifierEmailBcc: {} NewUserNotificationRecipients: {} NewInactiveUserNotificationRecipients: {} @@ -459,6 +460,13 @@ Clusters: # long-running balancing operation. BalanceTimeout: 6h + # Maximum number of replication_confirmed / + # storage_classes_confirmed updates to write to the database + # after a rebalancing run. When many updates are needed, this + # spreads them over a few runs rather than applying them all at + # once. + BalanceUpdateLimit: 100000 + # Default lifetime for ephemeral collections: 2 weeks. This must not # be less than BlobSigningTTL. DefaultTrashLifetime: 336h @@ -520,10 +528,10 @@ Clusters: # WebDAV would have to expose XSS vulnerabilities in order to # handle the redirect (see discussion on Services.WebDAV). # - # This setting has no effect in the recommended configuration, - # where the WebDAV is configured to have a separate domain for - # every collection; in this case XSS protection is provided by - # browsers' same-origin policy. + # This setting has no effect in the recommended configuration, where the + # WebDAV service is configured to have a separate domain for every + # collection and XSS protection is provided by browsers' same-origin + # policy. # # The default setting (false) is appropriate for a multi-user site. TrustAllContent: false @@ -545,20 +553,16 @@ Clusters: # Approximate memory limit (in bytes) for collection cache. MaxCollectionBytes: 100000000 - # Permission cache entries. - MaxPermissionEntries: 1000 - # UUID cache entries. MaxUUIDEntries: 1000 # Persistent sessions. MaxSessions: 100 - # Selectively set permissions for regular users and admins to be - # able to download or upload data files using the - # upload/download features for Workbench, WebDAV and S3 API - # support. - KeepWebPermission: + # Selectively set permissions for regular users and admins to + # download or upload data files using the upload/download + # features for Workbench, WebDAV and S3 API support. + WebDAVPermission: User: Download: true Upload: true @@ -577,8 +581,14 @@ Clusters: Download: true Upload: true + # Post upload / download events to the API server logs table, so + # that they can be included in the arv-user-activity report. + # You can disable this if you find that it is creating excess + # load on the API server and you don't need it. + WebDAVLogEvents: true + Login: - # One of the following mechanisms (SSO, Google, PAM, LDAP, or + # One of the following mechanisms (Google, PAM, LDAP, or # LoginCluster) should be enabled; see # https://doc.arvados.org/install/setup-login.html @@ -673,7 +683,7 @@ Clusters: AcceptAccessTokenScope: "" PAM: - # (Experimental) Use PAM to authenticate users. + # Use PAM to authenticate users. Enable: false # PAM service name. PAM will apply the policy in the @@ -759,16 +769,6 @@ Clusters: # originally supplied by the user will be used. UsernameAttribute: uid - SSO: - # Authenticate with a separate SSO server. (Deprecated) - Enable: false - - # ProviderAppID and ProviderAppSecret are generated during SSO - # setup; see - # https://doc.arvados.org/v2.0/install/install-sso.html#update-config - ProviderAppID: "" - ProviderAppSecret: "" - Test: # Authenticate users listed here in the config file. This # feature is intended to be used in test environments, and @@ -881,8 +881,8 @@ Clusters: UsePreemptibleInstances: false # PEM encoded SSH key (RSA, DSA, or ECDSA) used by the - # (experimental) cloud dispatcher for executing containers on - # worker VMs. Begins with "-----BEGIN RSA PRIVATE KEY-----\n" + # cloud dispatcher for executing containers on worker VMs. + # Begins with "-----BEGIN RSA PRIVATE KEY-----\n" # and ends with "\n-----END RSA PRIVATE KEY-----\n". DispatchPrivateKey: "" @@ -908,7 +908,7 @@ Clusters: # Minimum time between two attempts to run the same container MinRetryPeriod: 0s - # Container runtime: "docker" (default) or "singularity" (experimental) + # Container runtime: "docker" (default) or "singularity" RuntimeEngine: docker Logging: @@ -1020,6 +1020,33 @@ Clusters: # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.) AssignNodeHostname: "compute%d" + LSF: + # Arguments to bsub when submitting Arvados containers as LSF jobs. + # + # Template variables starting with % will be substituted as follows: + # + # %U uuid + # %C number of VCPUs + # %M memory in MB + # %T tmp in MB + # + # Use %% to express a literal %. The %%J in the default will be changed + # to %J, which is interpreted by bsub itself. + # + # Note that the default arguments cause LSF to write two files + # in /tmp on the compute node each time an Arvados container + # runs. Ensure you have something in place to delete old files + # from /tmp, or adjust the "-o" and "-e" arguments accordingly. + BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]"] + + # Use sudo to switch to this user account when submitting LSF + # jobs. + # + # This account must exist on the hosts where LSF jobs run + # ("execution hosts"), as well as on the host where the + # Arvados LSF dispatcher runs ("submission host"). + BsubSudoUser: "crunch" + JobsAPI: # Enable the legacy 'jobs' API (crunch v1). This value must be a string. # @@ -1039,7 +1066,7 @@ Clusters: GitInternalDir: /var/lib/arvados/internal.git CloudVMs: - # Enable the cloud scheduler (experimental). + # Enable the cloud scheduler. Enable: false # Name/number of port where workers' SSH services listen. @@ -1051,7 +1078,7 @@ Clusters: # Shell command to execute on each worker to determine whether # the worker is booted and ready to run containers. It should # exit zero if the worker is ready. - BootProbeCommand: "docker ps -q" + BootProbeCommand: "systemctl is-system-running" # Minimum interval between consecutive probes to a single # worker. @@ -1073,13 +1100,25 @@ Clusters: # Maximum create/destroy-instance operations per second (0 = # unlimited). - MaxCloudOpsPerSecond: 0 + MaxCloudOpsPerSecond: 10 - # Maximum concurrent node creation operations (0 = unlimited). This is - # recommended by Azure in certain scenarios (see - # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image) - # and can be used with other cloud providers too, if desired. - MaxConcurrentInstanceCreateOps: 0 + # Maximum concurrent instance creation operations (0 = unlimited). + # + # MaxConcurrentInstanceCreateOps limits the number of instance creation + # requests that can be in flight at any one time, whereas + # MaxCloudOpsPerSecond limits the number of create/destroy operations + # that can be started per second. + # + # Because the API for instance creation on Azure is synchronous, it is + # recommended to increase MaxConcurrentInstanceCreateOps when running + # on Azure. When using managed images, a value of 20 would be + # appropriate. When using Azure Shared Image Galeries, it could be set + # higher. For more information, see + # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image + # + # MaxConcurrentInstanceCreateOps can be increased for other cloud + # providers too, if desired. + MaxConcurrentInstanceCreateOps: 1 # Interval between cloud provider syncs/updates ("list all # instances"). @@ -1213,6 +1252,29 @@ Clusters: Price: 0.1 Preemptible: false + StorageClasses: + + # If you use multiple storage classes, specify them here, using + # the storage class name as the key (in place of "SAMPLE" in + # this sample entry). + # + # Further info/examples: + # https://doc.arvados.org/admin/storage-classes.html + SAMPLE: + + # Priority determines the order volumes should be searched + # when reading data, in cases where a keepstore server has + # access to multiple volumes with different storage classes. + Priority: 0 + + # Default determines which storage class(es) should be used + # when a user/client writes data or saves a new collection + # without specifying storage classes. + # + # If any StorageClasses are configured, at least one of them + # must have Default: true. + Default: true + Volumes: SAMPLE: # AccessViaHosts specifies which keepstore processes can read @@ -1236,7 +1298,9 @@ Clusters: ReadOnly: false Replication: 1 StorageClasses: - default: true + # If you have configured storage classes (see StorageClasses + # section above), add an entry here for each storage class + # satisfied by this volume. SAMPLE: true Driver: S3 DriverParameters: @@ -1254,6 +1318,7 @@ Clusters: ConnectTimeout: 1m ReadTimeout: 10m RaceWindow: 24h + PrefixLength: 0 # Use aws-s3-go (v2) instead of goamz UseAWSS3v2Driver: false