21717: Fix incorrect header.
[arvados.git] / services / keepstore / s3_volume.go
index 79a680d58a3efebab11467ca2f3a474d2e0d0feb..2e2e97a974efa2ddbb7b5e60f67160da85181980 100644 (file)
-package main
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
 
 import (
+       "bytes"
+       "context"
        "encoding/base64"
        "encoding/hex"
-       "flag"
+       "encoding/json"
+       "errors"
        "fmt"
        "io"
-       "log"
-       "net/http"
        "os"
        "regexp"
+       "strings"
+       "sync"
+       "sync/atomic"
        "time"
 
-       "github.com/AdRoll/goamz/aws"
-       "github.com/AdRoll/goamz/s3"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "github.com/aws/aws-sdk-go-v2/aws"
+       "github.com/aws/aws-sdk-go-v2/aws/awserr"
+       "github.com/aws/aws-sdk-go-v2/aws/defaults"
+       "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+       "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+       "github.com/aws/aws-sdk-go-v2/service/s3"
+       "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
 )
 
-var (
-       ErrS3DeleteNotAvailable = fmt.Errorf("delete without -s3-unsafe-delete is not implemented")
-
-       s3AccessKeyFile string
-       s3SecretKeyFile string
-       s3RegionName    string
-       s3Endpoint      string
-       s3Replication   int
-       s3UnsafeDelete  bool
+func init() {
+       driver["S3"] = news3Volume
+}
 
-       s3ACL = s3.Private
+const (
+       s3DefaultReadTimeout        = arvados.Duration(10 * time.Minute)
+       s3DefaultConnectTimeout     = arvados.Duration(time.Minute)
+       maxClockSkew                = 600 * time.Second
+       nearlyRFC1123               = "Mon, 2 Jan 2006 15:04:05 GMT"
+       s3downloaderPartSize        = 6 * 1024 * 1024
+       s3downloaderReadConcurrency = 11
+       s3uploaderPartSize          = 5 * 1024 * 1024
+       s3uploaderWriteConcurrency  = 5
 )
 
-const (
-       maxClockSkew  = 600 * time.Second
-       nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+var (
+       errS3TrashDisabled   = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+       s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+       s3AWSZeroTime        time.Time
 )
 
-type s3VolumeAdder struct {
-       *volumeSet
+// s3Volume implements Volume using an S3 bucket.
+type s3Volume struct {
+       arvados.S3VolumeDriverParameters
+       AuthToken      string    // populated automatically when IAMRole is used
+       AuthExpiration time.Time // populated automatically when IAMRole is used
+
+       cluster    *arvados.Cluster
+       volume     arvados.Volume
+       logger     logrus.FieldLogger
+       metrics    *volumeMetricsVecs
+       bufferPool *bufferPool
+       bucket     *s3Bucket
+       region     string
+       startOnce  sync.Once
 }
 
-func (s *s3VolumeAdder) Set(bucketName string) error {
-       if trashLifetime != 0 {
-               return ErrNotImplemented
-       }
-       if bucketName == "" {
-               return fmt.Errorf("no container name given")
-       }
-       if s3AccessKeyFile == "" || s3SecretKeyFile == "" {
-               return fmt.Errorf("-s3-access-key-file and -s3-secret-key-file arguments must given before -s3-bucket-volume")
+// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
+// wrapped bucket can be replaced atomically with SetBucket in order
+// to update credentials.
+type s3Bucket struct {
+       bucket string
+       svc    *s3.Client
+       stats  s3awsbucketStats
+       mu     sync.Mutex
+}
+
+func (v *s3Volume) isKeepBlock(s string) (string, bool) {
+       if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
+               s = s[v.PrefixLength+1:]
        }
-       region, ok := aws.Regions[s3RegionName]
-       if s3Endpoint == "" {
-               if !ok {
-                       return fmt.Errorf("unrecognized region %+q; try specifying -s3-endpoint instead", s3RegionName)
-               }
+       return s, s3AWSKeepBlockRegexp.MatchString(s)
+}
+
+// Return the key used for a given loc. If PrefixLength==0 then
+// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
+// "abc/abcdef0123", etc.
+func (v *s3Volume) key(loc string) string {
+       if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
+               return loc[:v.PrefixLength] + "/" + loc
        } else {
-               if ok {
-                       return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
-                               "specify empty endpoint (\"-s3-endpoint=\") or use a different region name", s3RegionName, s3Endpoint)
-               }
-               region = aws.Region{
-                       Name:       s3RegionName,
-                       S3Endpoint: s3Endpoint,
-               }
+               return loc
        }
-       var err error
-       var auth aws.Auth
-       auth.AccessKey, err = readKeyFromFile(s3AccessKeyFile)
-       if err != nil {
-               return err
+}
+
+func news3Volume(params newVolumeParams) (volume, error) {
+       v := &s3Volume{
+               cluster:    params.Cluster,
+               volume:     params.ConfigVolume,
+               metrics:    params.MetricsVecs,
+               bufferPool: params.BufferPool,
        }
-       auth.SecretKey, err = readKeyFromFile(s3SecretKeyFile)
+       err := json.Unmarshal(params.ConfigVolume.DriverParameters, v)
        if err != nil {
-               return err
+               return nil, err
+       }
+       v.logger = params.Logger.WithField("Volume", v.DeviceID())
+       return v, v.check("")
+}
+
+func (v *s3Volume) translateError(err error) error {
+       if _, ok := err.(*aws.RequestCanceledError); ok {
+               return context.Canceled
+       } else if aerr, ok := err.(awserr.Error); ok {
+               if aerr.Code() == "NotFound" {
+                       return os.ErrNotExist
+               } else if aerr.Code() == "NoSuchKey" {
+                       return os.ErrNotExist
+               }
        }
-       if flagSerializeIO {
-               log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
+       return err
+}
+
+// safeCopy calls CopyObjectRequest, and checks the response to make
+// sure the copy succeeded and updated the timestamp on the
+// destination object
+//
+// (If something goes wrong during the copy, the error will be
+// embedded in the 200 OK response)
+func (v *s3Volume) safeCopy(dst, src string) error {
+       input := &s3.CopyObjectInput{
+               Bucket:      aws.String(v.bucket.bucket),
+               ContentType: aws.String("application/octet-stream"),
+               CopySource:  aws.String(v.bucket.bucket + "/" + src),
+               Key:         aws.String(dst),
        }
-       v := NewS3Volume(auth, region, bucketName, flagReadonly, s3Replication)
-       if err := v.Check(); err != nil {
+
+       req := v.bucket.svc.CopyObjectRequest(input)
+       resp, err := req.Send(context.Background())
+
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
                return err
+       } else if err != nil {
+               return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
+       }
+
+       if resp.CopyObjectResult.LastModified == nil {
+               return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
+       } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
+               return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
        }
-       *s.volumeSet = append(*s.volumeSet, v)
        return nil
 }
 
-func s3regions() (okList []string) {
-       for r, _ := range aws.Regions {
-               okList = append(okList, r)
+func (v *s3Volume) check(ec2metadataHostname string) error {
+       if v.Bucket == "" {
+               return errors.New("DriverParameters: Bucket must be provided")
+       }
+       if v.IndexPageSize == 0 {
+               v.IndexPageSize = 1000
+       }
+       if v.RaceWindow < 0 {
+               return errors.New("DriverParameters: RaceWindow must not be negative")
        }
-       return
-}
 
-func init() {
-       flag.Var(&s3VolumeAdder{&volumes},
-               "s3-bucket-volume",
-               "Use the given bucket as a storage volume. Can be given multiple times.")
-       flag.StringVar(
-               &s3RegionName,
-               "s3-region",
-               "",
-               fmt.Sprintf("AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are %+q.", s3regions()))
-       flag.StringVar(
-               &s3Endpoint,
-               "s3-endpoint",
-               "",
-               "Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use \"https://storage.googleapis.com\".")
-       flag.StringVar(
-               &s3AccessKeyFile,
-               "s3-access-key-file",
-               "",
-               "File containing the access key used for subsequent -s3-bucket-volume arguments.")
-       flag.StringVar(
-               &s3SecretKeyFile,
-               "s3-secret-key-file",
-               "",
-               "File containing the secret key used for subsequent -s3-bucket-volume arguments.")
-       flag.IntVar(
-               &s3Replication,
-               "s3-replication",
-               2,
-               "Replication level reported to clients for subsequent -s3-bucket-volume arguments.")
-       flag.BoolVar(
-               &s3UnsafeDelete,
-               "s3-unsafe-delete",
-               false,
-               "EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.")
-}
-
-type S3Volume struct {
-       *s3.Bucket
-       readonly      bool
-       replication   int
-       indexPageSize int
-}
-
-// NewS3Volume returns a new S3Volume using the given auth, region,
-// and bucket name. The replication argument specifies the replication
-// level to report when writing data.
-func NewS3Volume(auth aws.Auth, region aws.Region, bucket string, readonly bool, replication int) *S3Volume {
-       return &S3Volume{
-               Bucket: &s3.Bucket{
-                       S3:   s3.New(auth, region),
-                       Name: bucket,
-               },
-               readonly:      readonly,
-               replication:   replication,
-               indexPageSize: 1000,
-       }
-}
-
-func (v *S3Volume) Check() error {
+       if v.V2Signature {
+               return errors.New("DriverParameters: V2Signature is not supported")
+       }
+
+       defaultResolver := endpoints.NewDefaultResolver()
+
+       cfg := defaults.Config()
+
+       if v.Endpoint == "" && v.Region == "" {
+               return fmt.Errorf("AWS region or endpoint must be specified")
+       } else if v.Endpoint != "" || ec2metadataHostname != "" {
+               myCustomResolver := func(service, region string) (aws.Endpoint, error) {
+                       if v.Endpoint != "" && service == "s3" {
+                               return aws.Endpoint{
+                                       URL:           v.Endpoint,
+                                       SigningRegion: region,
+                               }, nil
+                       } else if service == "ec2metadata" && ec2metadataHostname != "" {
+                               return aws.Endpoint{
+                                       URL: ec2metadataHostname,
+                               }, nil
+                       } else {
+                               return defaultResolver.ResolveEndpoint(service, region)
+                       }
+               }
+               cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
+       }
+       if v.Region == "" {
+               // Endpoint is already specified (otherwise we would
+               // have errored out above), but Region is also
+               // required by the aws sdk, in order to determine
+               // SignatureVersions.
+               v.Region = "us-east-1"
+       }
+       cfg.Region = v.Region
+
+       // Zero timeouts mean "wait forever", which is a bad
+       // default. Default to long timeouts instead.
+       if v.ConnectTimeout == 0 {
+               v.ConnectTimeout = s3DefaultConnectTimeout
+       }
+       if v.ReadTimeout == 0 {
+               v.ReadTimeout = s3DefaultReadTimeout
+       }
+
+       creds := aws.NewChainProvider(
+               []aws.CredentialsProvider{
+                       aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
+                       ec2rolecreds.New(ec2metadata.New(cfg), func(opts *ec2rolecreds.ProviderOptions) {
+                               // (from aws-sdk-go-v2 comments)
+                               // "allow the credentials to trigger
+                               // refreshing prior to the credentials
+                               // actually expiring. This is
+                               // beneficial so race conditions with
+                               // expiring credentials do not cause
+                               // request to fail unexpectedly due to
+                               // ExpiredTokenException exceptions."
+                               //
+                               // (from
+                               // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
+                               // "We make new credentials available
+                               // at least five minutes before the
+                               // expiration of the old credentials."
+                               opts.ExpiryWindow = 5 * time.Minute
+                       }),
+               })
+
+       cfg.Credentials = creds
+
+       v.bucket = &s3Bucket{
+               bucket: v.Bucket,
+               svc:    s3.New(cfg),
+       }
+
+       // Set up prometheus metrics
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
+       v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
+
        return nil
 }
 
-func (v *S3Volume) Get(loc string) ([]byte, error) {
-       rdr, err := v.Bucket.GetReader(loc)
-       if err != nil {
-               return nil, v.translateError(err)
+// DeviceID returns a globally unique ID for the storage bucket.
+func (v *s3Volume) DeviceID() string {
+       return "s3://" + v.Endpoint + "/" + v.Bucket
+}
+
+// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
+// and deletes them from the volume.
+func (v *s3Volume) EmptyTrash() {
+       var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
+
+       // Define "ready to delete" as "...when EmptyTrash started".
+       startT := time.Now()
+
+       emptyOneKey := func(trash *s3.Object) {
+               key := strings.TrimPrefix(*trash.Key, "trash/")
+               loc, isblk := v.isKeepBlock(key)
+               if !isblk {
+                       return
+               }
+               atomic.AddInt64(&bytesInTrash, *trash.Size)
+               atomic.AddInt64(&blocksInTrash, 1)
+
+               trashT := *trash.LastModified
+               recent, err := v.head("recent/" + key)
+               if err != nil && os.IsNotExist(v.translateError(err)) {
+                       v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
+                       err = v.BlockUntrash(loc)
+                       if err != nil {
+                               v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
+                       }
+                       return
+               } else if err != nil {
+                       v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
+                       return
+               }
+               if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
+                       if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
+                               // recent/key is too old to protect
+                               // loc from being Trashed again during
+                               // the raceWindow that starts if we
+                               // delete trash/X now.
+                               //
+                               // Note this means (TrashSweepInterval
+                               // < BlobSigningTTL - raceWindow) is
+                               // necessary to avoid starvation.
+                               v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
+                               v.fixRace(key)
+                               v.BlockTouch(loc)
+                               return
+                       }
+                       _, err := v.head(key)
+                       if os.IsNotExist(err) {
+                               v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
+                               v.fixRace(key)
+                               return
+                       } else if err != nil {
+                               v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
+                               return
+                       }
+               }
+               if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
+                       return
+               }
+               err = v.bucket.Del(*trash.Key)
+               if err != nil {
+                       v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
+                       return
+               }
+               atomic.AddInt64(&bytesDeleted, *trash.Size)
+               atomic.AddInt64(&blocksDeleted, 1)
+
+               _, err = v.head(*trash.Key)
+               if err == nil {
+                       v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
+                       return
+               }
+               if !os.IsNotExist(v.translateError(err)) {
+                       v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
+                       return
+               }
+               err = v.bucket.Del("recent/" + key)
+               if err != nil {
+                       v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
+               }
        }
-       defer rdr.Close()
-       buf := bufs.Get(BlockSize)
-       n, err := io.ReadFull(rdr, buf)
-       switch err {
-       case nil, io.EOF, io.ErrUnexpectedEOF:
-               return buf[:n], nil
-       default:
-               bufs.Put(buf)
-               return nil, v.translateError(err)
+
+       var wg sync.WaitGroup
+       todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
+       for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       for key := range todo {
+                               emptyOneKey(key)
+                       }
+               }()
+       }
+
+       trashL := s3awsLister{
+               Logger:   v.logger,
+               Bucket:   v.bucket,
+               Prefix:   "trash/",
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+               todo <- trash
+       }
+       close(todo)
+       wg.Wait()
+
+       if err := trashL.Error(); err != nil {
+               v.logger.WithError(err).Error("EmptyTrash: lister failed")
        }
+       v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
-func (v *S3Volume) Compare(loc string, expect []byte) error {
-       rdr, err := v.Bucket.GetReader(loc)
+// fixRace(X) is called when "recent/X" exists but "X" doesn't
+// exist. If the timestamps on "recent/X" and "trash/X" indicate there
+// was a race between Put and Trash, fixRace recovers from the race by
+// Untrashing the block.
+func (v *s3Volume) fixRace(key string) bool {
+       trash, err := v.head("trash/" + key)
        if err != nil {
-               return v.translateError(err)
+               if !os.IsNotExist(v.translateError(err)) {
+                       v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
+               }
+               return false
+       }
+
+       recent, err := v.head("recent/" + key)
+       if err != nil {
+               v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
+               return false
+       }
+
+       recentTime := *recent.LastModified
+       trashTime := *trash.LastModified
+       ageWhenTrashed := trashTime.Sub(recentTime)
+       if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
+               // No evidence of a race: block hasn't been written
+               // since it became eligible for Trash. No fix needed.
+               return false
        }
-       defer rdr.Close()
-       return v.translateError(compareReaderWithBuf(rdr, expect, loc[:32]))
+
+       v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
+       v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
+       err = v.safeCopy(key, "trash/"+key)
+       if err != nil {
+               v.logger.WithError(err).Error("fixRace: copy failed")
+               return false
+       }
+       return true
 }
 
-func (v *S3Volume) Put(loc string, block []byte) error {
-       if v.readonly {
-               return MethodDisabledError
+func (v *s3Volume) head(key string) (result *s3.HeadObjectOutput, err error) {
+       input := &s3.HeadObjectInput{
+               Bucket: aws.String(v.bucket.bucket),
+               Key:    aws.String(key),
        }
-       var opts s3.Options
-       if len(block) > 0 {
-               md5, err := hex.DecodeString(loc)
+
+       req := v.bucket.svc.HeadObjectRequest(input)
+       res, err := req.Send(context.TODO())
+
+       v.bucket.stats.TickOps("head")
+       v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
+       v.bucket.stats.TickErr(err)
+
+       if err != nil {
+               return nil, v.translateError(err)
+       }
+       result = res.HeadObjectOutput
+       return
+}
+
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the S3 bucket.
+func (v *s3Volume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+       key := v.key(hash)
+       err := v.readWorker(ctx, key, w)
+       if err != nil {
+               err = v.translateError(err)
+               if !os.IsNotExist(err) {
+                       return err
+               }
+
+               _, err = v.head("recent/" + key)
+               err = v.translateError(err)
+               if err != nil {
+                       // If we can't read recent/X, there's no point in
+                       // trying fixRace. Give up.
+                       return err
+               }
+               if !v.fixRace(key) {
+                       err = os.ErrNotExist
+                       return err
+               }
+
+               err = v.readWorker(ctx, key, w)
                if err != nil {
+                       v.logger.Warnf("reading %s after successful fixRace: %s", hash, err)
+                       err = v.translateError(err)
                        return err
                }
-               opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
        }
-       return v.translateError(
-               v.Bucket.Put(
-                       loc, block, "application/octet-stream", s3ACL, opts))
+       return nil
 }
 
-func (v *S3Volume) Touch(loc string) error {
-       if v.readonly {
-               return MethodDisabledError
+func (v *s3Volume) readWorker(ctx context.Context, key string, dst io.WriterAt) error {
+       downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
+               u.PartSize = s3downloaderPartSize
+               u.Concurrency = s3downloaderReadConcurrency
+       })
+       count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{
+               Bucket: aws.String(v.bucket.bucket),
+               Key:    aws.String(key),
+       })
+       v.bucket.stats.TickOps("get")
+       v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
+       v.bucket.stats.TickErr(err)
+       v.bucket.stats.TickInBytes(uint64(count))
+       return v.translateError(err)
+}
+
+func (v *s3Volume) writeObject(ctx context.Context, key string, r io.Reader) error {
+       if r == nil {
+               // r == nil leads to a memory violation in func readFillBuf in
+               // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
+               r = bytes.NewReader(nil)
        }
-       result, err := v.Bucket.PutCopy(loc, s3ACL, s3.CopyOptions{
-               ContentType:       "application/octet-stream",
-               MetadataDirective: "REPLACE",
-       }, v.Bucket.Name+"/"+loc)
-       if err != nil {
-               return v.translateError(err)
+
+       uploadInput := s3manager.UploadInput{
+               Bucket: aws.String(v.bucket.bucket),
+               Key:    aws.String(key),
+               Body:   r,
        }
-       t, err := time.Parse(time.RFC3339, result.LastModified)
+
+       if loc, ok := v.isKeepBlock(key); ok {
+               var contentMD5 string
+               md5, err := hex.DecodeString(loc)
+               if err != nil {
+                       return v.translateError(err)
+               }
+               contentMD5 = base64.StdEncoding.EncodeToString(md5)
+               uploadInput.ContentMD5 = &contentMD5
+       }
+
+       // Experimentation indicated that using concurrency 5 yields the best
+       // throughput, better than higher concurrency (10 or 13) by ~5%.
+       // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
+       // is detrimental to throughput (minus ~15%).
+       uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
+               u.PartSize = s3uploaderPartSize
+               u.Concurrency = s3uploaderWriteConcurrency
+       })
+
+       // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
+       // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
+       // block, so there is no extra memory use to be concerned about. See
+       // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
+       // calculating the Sha-256 because we don't need it; we already use md5sum
+       // hashes that match the name of the block.
+       _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
+               r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
+       }))
+
+       v.bucket.stats.TickOps("put")
+       v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
+       v.bucket.stats.TickErr(err)
+
+       return v.translateError(err)
+}
+
+// Put writes a block.
+func (v *s3Volume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+       // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
+       // sdk to avoid memory allocation there. See #17339 for more information.
+       rdr := bytes.NewReader(data)
+       r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+       key := v.key(hash)
+       err := v.writeObject(ctx, key, r)
        if err != nil {
                return err
        }
-       if time.Since(t) > maxClockSkew {
-               return fmt.Errorf("PutCopy returned old LastModified %s => %s (%s ago)", result.LastModified, t, time.Since(t))
+       return v.writeObject(ctx, "recent/"+key, nil)
+}
+
+type s3awsLister struct {
+       Logger            logrus.FieldLogger
+       Bucket            *s3Bucket
+       Prefix            string
+       PageSize          int
+       Stats             *s3awsbucketStats
+       ContinuationToken string
+       buf               []s3.Object
+       err               error
+}
+
+// First fetches the first page and returns the first item. It returns
+// nil if the response is the empty set or an error occurs.
+func (lister *s3awsLister) First() *s3.Object {
+       lister.getPage()
+       return lister.pop()
+}
+
+// Next returns the next item, fetching the next page if necessary. It
+// returns nil if the last available item has already been fetched, or
+// an error occurs.
+func (lister *s3awsLister) Next() *s3.Object {
+       if len(lister.buf) == 0 && lister.ContinuationToken != "" {
+               lister.getPage()
        }
-       return nil
+       return lister.pop()
+}
+
+// Return the most recent error encountered by First or Next.
+func (lister *s3awsLister) Error() error {
+       return lister.err
 }
 
-func (v *S3Volume) Mtime(loc string) (time.Time, error) {
-       resp, err := v.Bucket.Head(loc, nil)
+func (lister *s3awsLister) getPage() {
+       lister.Stats.TickOps("list")
+       lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
+
+       var input *s3.ListObjectsV2Input
+       if lister.ContinuationToken == "" {
+               input = &s3.ListObjectsV2Input{
+                       Bucket:  aws.String(lister.Bucket.bucket),
+                       MaxKeys: aws.Int64(int64(lister.PageSize)),
+                       Prefix:  aws.String(lister.Prefix),
+               }
+       } else {
+               input = &s3.ListObjectsV2Input{
+                       Bucket:            aws.String(lister.Bucket.bucket),
+                       MaxKeys:           aws.Int64(int64(lister.PageSize)),
+                       Prefix:            aws.String(lister.Prefix),
+                       ContinuationToken: &lister.ContinuationToken,
+               }
+       }
+
+       req := lister.Bucket.svc.ListObjectsV2Request(input)
+       resp, err := req.Send(context.Background())
        if err != nil {
-               return zeroTime, v.translateError(err)
+               if aerr, ok := err.(awserr.Error); ok {
+                       lister.err = aerr
+               } else {
+                       lister.err = err
+               }
+               return
        }
-       hdr := resp.Header.Get("Last-Modified")
-       t, err := time.Parse(time.RFC1123, hdr)
-       if err != nil && hdr != "" {
-               // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
-               // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
-               // as required by HTTP spec. If it's not a valid HTTP
-               // header value, it's probably AWS (or s3test) giving
-               // us a nearly-RFC1123 timestamp.
-               t, err = time.Parse(nearlyRFC1123, hdr)
+
+       if *resp.IsTruncated {
+               lister.ContinuationToken = *resp.NextContinuationToken
+       } else {
+               lister.ContinuationToken = ""
+       }
+       lister.buf = make([]s3.Object, 0, len(resp.Contents))
+       for _, key := range resp.Contents {
+               if !strings.HasPrefix(*key.Key, lister.Prefix) {
+                       lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
+                       continue
+               }
+               lister.buf = append(lister.buf, key)
        }
-       return t, err
 }
 
-func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
-       nextMarker := ""
-       for {
-               listResp, err := v.Bucket.List(prefix, "", nextMarker, v.indexPageSize)
-               if err != nil {
-                       return err
+func (lister *s3awsLister) pop() (k *s3.Object) {
+       if len(lister.buf) > 0 {
+               k = &lister.buf[0]
+               lister.buf = lister.buf[1:]
+       }
+       return
+}
+
+// Index writes a complete list of locators with the given prefix
+// for which Get() can retrieve data.
+func (v *s3Volume) Index(ctx context.Context, prefix string, writer io.Writer) error {
+       prefix = v.key(prefix)
+       // Use a merge sort to find matching sets of X and recent/X.
+       dataL := s3awsLister{
+               Logger:   v.logger,
+               Bucket:   v.bucket,
+               Prefix:   prefix,
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       recentL := s3awsLister{
+               Logger:   v.logger,
+               Bucket:   v.bucket,
+               Prefix:   "recent/" + prefix,
+               PageSize: v.IndexPageSize,
+               Stats:    &v.bucket.stats,
+       }
+       for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
+               if ctx.Err() != nil {
+                       return ctx.Err()
                }
-               for _, key := range listResp.Contents {
-                       t, err := time.Parse(time.RFC3339, key.LastModified)
-                       if err != nil {
-                               return err
-                       }
-                       if !v.isKeepBlock(key.Key) {
+               if *data.Key >= "g" {
+                       // Conveniently, "recent/*" and "trash/*" are
+                       // lexically greater than all hex-encoded data
+                       // hashes, so stopping here avoids iterating
+                       // over all of them needlessly with dataL.
+                       break
+               }
+               loc, isblk := v.isKeepBlock(*data.Key)
+               if !isblk {
+                       continue
+               }
+
+               // stamp is the list entry we should use to report the
+               // last-modified time for this data block: it will be
+               // the recent/X entry if one exists, otherwise the
+               // entry for the data block itself.
+               stamp := data
+
+               // Advance to the corresponding recent/X marker, if any
+               for recent != nil && recentL.Error() == nil {
+                       if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
+                               recent = recentL.Next()
                                continue
+                       } else if cmp == 0 {
+                               stamp = recent
+                               recent = recentL.Next()
+                               break
+                       } else {
+                               // recent/X marker is missing: we'll
+                               // use the timestamp on the data
+                               // object.
+                               break
                        }
-                       fmt.Fprintf(writer, "%s+%d %d\n", key.Key, key.Size, t.Unix())
                }
-               if !listResp.IsTruncated {
-                       break
+               if err := recentL.Error(); err != nil {
+                       return err
                }
-               nextMarker = listResp.NextMarker
+               // We truncate sub-second precision here. Otherwise
+               // timestamps will never match the RFC1123-formatted
+               // Last-Modified values parsed by Mtime().
+               fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
        }
-       return nil
+       return dataL.Error()
 }
 
-// Trash a Keep block.
-func (v *S3Volume) Trash(loc string) error {
-       if v.readonly {
-               return MethodDisabledError
-       }
-       if trashLifetime != 0 {
-               return ErrNotImplemented
-       }
-       if t, err := v.Mtime(loc); err != nil {
-               return err
-       } else if time.Since(t) < blobSignatureTTL {
-               return nil
+// Mtime returns the stored timestamp for the given locator.
+func (v *s3Volume) Mtime(loc string) (time.Time, error) {
+       key := v.key(loc)
+       _, err := v.head(key)
+       if err != nil {
+               return s3AWSZeroTime, v.translateError(err)
        }
-       if !s3UnsafeDelete {
-               return ErrS3DeleteNotAvailable
+       resp, err := v.head("recent/" + key)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // The data object X exists, but recent/X is missing.
+               err = v.writeObject(context.Background(), "recent/"+key, nil)
+               if err != nil {
+                       v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
+                       return s3AWSZeroTime, v.translateError(err)
+               }
+               v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
+               resp, err = v.head("recent/" + key)
+               if err != nil {
+                       v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
+                       return s3AWSZeroTime, v.translateError(err)
+               }
+       } else if err != nil {
+               // HEAD recent/X failed for some other reason.
+               return s3AWSZeroTime, err
        }
-       return v.Bucket.Del(loc)
+       return *resp.LastModified, err
 }
 
-// TBD
-func (v *S3Volume) Untrash(loc string) error {
-       return ErrNotImplemented
+// InternalStats returns bucket I/O and API call counters.
+func (v *s3Volume) InternalStats() interface{} {
+       return &v.bucket.stats
 }
 
-func (v *S3Volume) Status() *VolumeStatus {
-       return &VolumeStatus{
-               DeviceNum: 1,
-               BytesFree: BlockSize * 1000,
-               BytesUsed: 1,
+// BlockTouch sets the timestamp for the given locator to the current time.
+func (v *s3Volume) BlockTouch(hash string) error {
+       key := v.key(hash)
+       _, err := v.head(key)
+       err = v.translateError(err)
+       if os.IsNotExist(err) && v.fixRace(key) {
+               // The data object got trashed in a race, but fixRace
+               // rescued it.
+       } else if err != nil {
+               return err
        }
+       err = v.writeObject(context.Background(), "recent/"+key, nil)
+       return v.translateError(err)
 }
 
-func (v *S3Volume) String() string {
-       return fmt.Sprintf("s3-bucket:%+q", v.Bucket.Name)
+// checkRaceWindow returns a non-nil error if trash/key is, or might
+// be, in the race window (i.e., it's not safe to trash key).
+func (v *s3Volume) checkRaceWindow(key string) error {
+       resp, err := v.head("trash/" + key)
+       err = v.translateError(err)
+       if os.IsNotExist(err) {
+               // OK, trash/X doesn't exist so we're not in the race
+               // window
+               return nil
+       } else if err != nil {
+               // Error looking up trash/X. We don't know whether
+               // we're in the race window
+               return err
+       }
+       t := resp.LastModified
+       safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
+       if safeWindow <= 0 {
+               // We can't count on "touch trash/X" to prolong
+               // trash/X's lifetime. The new timestamp might not
+               // become visible until now+raceWindow, and EmptyTrash
+               // is allowed to delete trash/X before then.
+               return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
+       }
+       // trash/X exists, but it won't be eligible for deletion until
+       // after now+raceWindow, so it's safe to overwrite it.
+       return nil
 }
 
-func (v *S3Volume) Writable() bool {
-       return !v.readonly
+func (b *s3Bucket) Del(path string) error {
+       input := &s3.DeleteObjectInput{
+               Bucket: aws.String(b.bucket),
+               Key:    aws.String(path),
+       }
+       req := b.svc.DeleteObjectRequest(input)
+       _, err := req.Send(context.Background())
+       b.stats.TickOps("delete")
+       b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
+       b.stats.TickErr(err)
+       return err
 }
-func (v *S3Volume) Replication() int {
-       return v.replication
+
+// Trash a Keep block.
+func (v *s3Volume) BlockTrash(loc string) error {
+       if t, err := v.Mtime(loc); err != nil {
+               return err
+       } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
+               return nil
+       }
+       key := v.key(loc)
+       if v.cluster.Collections.BlobTrashLifetime == 0 {
+               if !v.UnsafeDelete {
+                       return errS3TrashDisabled
+               }
+               return v.translateError(v.bucket.Del(key))
+       }
+       err := v.checkRaceWindow(key)
+       if err != nil {
+               return err
+       }
+       err = v.safeCopy("trash/"+key, key)
+       if err != nil {
+               return err
+       }
+       return v.translateError(v.bucket.Del(key))
 }
 
-var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+// BlockUntrash moves block from trash back into store
+func (v *s3Volume) BlockUntrash(hash string) error {
+       key := v.key(hash)
+       err := v.safeCopy(key, "trash/"+key)
+       if err != nil {
+               return err
+       }
+       err = v.writeObject(context.Background(), "recent/"+key, nil)
+       return v.translateError(err)
+}
 
-func (v *S3Volume) isKeepBlock(s string) bool {
-       return s3KeepBlockRegexp.MatchString(s)
+type s3awsbucketStats struct {
+       statsTicker
+       Ops     uint64
+       GetOps  uint64
+       PutOps  uint64
+       HeadOps uint64
+       DelOps  uint64
+       ListOps uint64
 }
 
-func (v *S3Volume) translateError(err error) error {
-       switch err := err.(type) {
-       case *s3.Error:
-               if err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey" {
-                       return os.ErrNotExist
+func (s *s3awsbucketStats) TickErr(err error) {
+       if err == nil {
+               return
+       }
+       errType := fmt.Sprintf("%T", err)
+       if aerr, ok := err.(awserr.Error); ok {
+               if reqErr, ok := err.(awserr.RequestFailure); ok {
+                       // A service error occurred
+                       errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
+               } else {
+                       errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
                }
-               // Other 404 errors like NoSuchVersion and
-               // NoSuchBucket are different problems which should
-               // get called out downstream, so we don't convert them
-               // to os.ErrNotExist.
        }
-       return err
-}
-
-// EmptyTrash looks for trashed blocks that exceeded trashLifetime
-// and deletes them from the volume.
-// TBD
-func (v *S3Volume) EmptyTrash() {
+       s.statsTicker.TickErr(err, errType)
 }