+# Maximum concurrent requests. When this limit is reached, new
+# requests will receive 503 responses. Note: this limit does not
+# include idle connections from clients using HTTP keepalive, so it
+# does not strictly limit the number of concurrent connections. If
+# omitted or zero, the default is 2 * MaxBuffers.
+MaxRequests: 0
+
+# Path to write PID file during startup. This file is kept open and
+# locked with LOCK_EX until keepstore exits, so "fuser -k pidfile" is
+# one way to shut down. Exit immediately if there is an error
+# opening, locking, or writing the PID file.
+PIDFile: ""
+
+# Maximum number of concurrent pull operations. Default is 1, i.e.,
+# pull lists are processed serially.
+PullWorkers: 0
+
+# Honor read requests only if a valid signature is provided. This
+# should be true, except for development use and when migrating from
+# a very old version.
+RequireSignatures: true
+
+# Local file containing the Arvados API token used by keep-balance
+# or data manager. Delete, trash, and index requests are honored
+# only for this token.
+SystemAuthTokenFile: ""
+
+# Path to server certificate file in X509 format. Enables TLS mode.
+#
+# Example: /var/lib/acme/live/keep0.example.com/fullchain
+TLSCertificateFile: ""
+
+# Path to server key file in X509 format. Enables TLS mode.
+#
+# The key pair is read from disk during startup, and whenever SIGHUP
+# is received.
+#
+# Example: /var/lib/acme/live/keep0.example.com/privkey
+TLSKeyFile: ""
+
+# How often to check for (and delete) trashed blocks whose
+# TrashLifetime has expired.
+TrashCheckInterval: 24h0m0s
+
+# Time duration after a block is trashed during which it can be
+# recovered using an /untrash request.
+TrashLifetime: 336h0m0s
+
+# Maximum number of concurrent trash operations. Default is 1, i.e.,
+# trash lists are processed serially.
+TrashWorkers: 1
+</pre>
+
+h3. Notes on storage management
+
+On its own, a keepstore server never deletes data. The "keep-balance":install-keep-balance.html service service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash.
+
+When a block is newly written, it is protected from deletion for the duration in @BlobSignatureTTL@. During this time, it cannot be trashed.
+
+If keep-balance instructs keepstore to trash a block which is older than @BlobSignatureTTL@, and @EnableDelete@ is true, the block will be moved to "trash".
+
+h3. Configure storage volumes
+
+Available storage volume types include cloud object storage and POSIX filesystems.
+
+If you are using S3-compatible object storage (including Amazon S3, Google Cloud Storage, and Ceph RADOS), follow the setup instructions "S3 Object Storage":configure-s3-object-storage.html page instead and then "Run keepstore as a supervised service.":#keepstoreservice
+
+If you are using Azure Blob Storage, follow the setup instructions "Azure Blob Storage":configure-azure-blob-storage.html and then proceed to "Run keepstore as a supervised service.":#keepstoreservice
+
+To use a POSIX filesystem, including both local filesystems (ext4, xfs) and network file system such as GPFS or Lustre, continue reading this section.
+
+h4. Setting up filesystem mounts
+
+Volumes are configured in the @Volumes@ section of the configuration
+file. You may provide multiple volumes for a single keepstore process
+to manage multiple disks. Keepstore distributes blocks among volumes
+in round-robin fashion.
+
+<pre>
+Volumes:
+- # The volume type, indicates this is a filesystem directory.
+ Type: Directory
+
+ # The actual directory that will be used as the backing store.
+ Root: /mnt/local-disk
+
+ # How much replication is performed by the underlying filesystem.
+ # (for example, a network filesystem may provide its own replication).
+ # This is used to inform replication decisions at the Keep layer.
+ DirectoryReplication: 1
+
+ # If true, do not accept write or trash operations, only reads.
+ ReadOnly: false
+
+ # When true, read and write operations (for whole 64MiB blocks) on
+ # an individual volume will queued and issued sequentially. When
+ # false, read and write operations will be issued concurrently as
+ # they come in.
+ #
+ # When using spinning disks where storage partitions map 1:1 to
+ # physical disks that are dedicated to Keepstore, enabling this may
+ # reduce contention and improve throughput by minimizing seeks.
+ #
+ # When using SSDs, RAID, or a parallel network filesystem, you probably
+ # don't want this.
+ Serialize: true
+
+ # Storage classes to associate with this volume. See "Configuring
+ # storage classes" in the "Admin" section of doc.arvados.org.
+ StorageClasses: null