-package main
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
import (
+ "bytes"
"context"
"encoding/base64"
"encoding/hex"
- "flag"
+ "encoding/json"
+ "errors"
"fmt"
"io"
- "log"
- "net/http"
"os"
"regexp"
"strings"
"sync"
+ "sync/atomic"
"time"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/AdRoll/goamz/aws"
- "github.com/AdRoll/goamz/s3"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/awserr"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/aws/endpoints"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
-var (
- // ErrS3TrashDisabled is returned by Trash if that operation
- // is impossible with the current config.
- ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
-
- s3AccessKeyFile string
- s3SecretKeyFile string
- s3RegionName string
- s3Endpoint string
- s3Replication int
- s3UnsafeDelete bool
- s3RaceWindow time.Duration
-
- s3ACL = s3.Private
-)
+func init() {
+ driver["S3"] = news3Volume
+}
const (
- maxClockSkew = 600 * time.Second
- nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+ s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
+ s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+ maxClockSkew = 600 * time.Second
+ nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+ s3downloaderPartSize = 6 * 1024 * 1024
+ s3downloaderReadConcurrency = 11
+ s3uploaderPartSize = 5 * 1024 * 1024
+ s3uploaderWriteConcurrency = 5
)
-type s3VolumeAdder struct {
- *Config
+var (
+ errS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+ s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+ s3AWSZeroTime time.Time
+)
+
+// s3Volume implements Volume using an S3 bucket.
+type s3Volume struct {
+ arvados.S3VolumeDriverParameters
+ AuthToken string // populated automatically when IAMRole is used
+ AuthExpiration time.Time // populated automatically when IAMRole is used
+
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ bufferPool *bufferPool
+ bucket *s3Bucket
+ region string
+ startOnce sync.Once
+}
+
+// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
+// wrapped bucket can be replaced atomically with SetBucket in order
+// to update credentials.
+type s3Bucket struct {
+ bucket string
+ svc *s3.Client
+ stats s3awsbucketStats
+ mu sync.Mutex
+}
+
+func (v *s3Volume) isKeepBlock(s string) (string, bool) {
+ if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
+ s = s[v.PrefixLength+1:]
+ }
+ return s, s3AWSKeepBlockRegexp.MatchString(s)
+}
+
+// Return the key used for a given loc. If PrefixLength==0 then
+// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
+// "abc/abcdef0123", etc.
+func (v *s3Volume) key(loc string) string {
+ if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
+ return loc[:v.PrefixLength] + "/" + loc
+ } else {
+ return loc
+ }
}
-// String implements flag.Value
-func (s *s3VolumeAdder) String() string {
- return "-"
+func news3Volume(params newVolumeParams) (volume, error) {
+ v := &s3Volume{
+ cluster: params.Cluster,
+ volume: params.ConfigVolume,
+ metrics: params.MetricsVecs,
+ bufferPool: params.BufferPool,
+ }
+ err := json.Unmarshal(params.ConfigVolume.DriverParameters, v)
+ if err != nil {
+ return nil, err
+ }
+ v.logger = params.Logger.WithField("Volume", v.DeviceID())
+ return v, v.check("")
+}
+
+func (v *s3Volume) translateError(err error) error {
+ if _, ok := err.(*aws.RequestCanceledError); ok {
+ return context.Canceled
+ } else if aerr, ok := err.(awserr.Error); ok {
+ if aerr.Code() == "NotFound" {
+ return os.ErrNotExist
+ } else if aerr.Code() == "NoSuchKey" {
+ return os.ErrNotExist
+ }
+ }
+ return err
}
-func (s *s3VolumeAdder) Set(bucketName string) error {
- if bucketName == "" {
- return fmt.Errorf("no container name given")
+// safeCopy calls CopyObjectRequest, and checks the response to make
+// sure the copy succeeded and updated the timestamp on the
+// destination object
+//
+// (If something goes wrong during the copy, the error will be
+// embedded in the 200 OK response)
+func (v *s3Volume) safeCopy(dst, src string) error {
+ input := &s3.CopyObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ ContentType: aws.String("application/octet-stream"),
+ CopySource: aws.String(v.bucket.bucket + "/" + src),
+ Key: aws.String(dst),
}
- if s3AccessKeyFile == "" || s3SecretKeyFile == "" {
- return fmt.Errorf("-s3-access-key-file and -s3-secret-key-file arguments must given before -s3-bucket-volume")
+
+ req := v.bucket.svc.CopyObjectRequest(input)
+ resp, err := req.Send(context.Background())
+
+ err = v.translateError(err)
+ if os.IsNotExist(err) {
+ return err
+ } else if err != nil {
+ return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
}
- if deprecated.flagSerializeIO {
- log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
+
+ if resp.CopyObjectResult.LastModified == nil {
+ return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
+ } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
+ return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
}
- s.Config.Volumes = append(s.Config.Volumes, &S3Volume{
- Bucket: bucketName,
- AccessKeyFile: s3AccessKeyFile,
- SecretKeyFile: s3SecretKeyFile,
- Endpoint: s3Endpoint,
- Region: s3RegionName,
- RaceWindow: arvados.Duration(s3RaceWindow),
- S3Replication: s3Replication,
- UnsafeDelete: s3UnsafeDelete,
- ReadOnly: deprecated.flagReadonly,
- IndexPageSize: 1000,
- })
return nil
}
-func s3regions() (okList []string) {
- for r := range aws.Regions {
- okList = append(okList, r)
+func (v *s3Volume) check(ec2metadataHostname string) error {
+ if v.Bucket == "" {
+ return errors.New("DriverParameters: Bucket must be provided")
+ }
+ if v.IndexPageSize == 0 {
+ v.IndexPageSize = 1000
+ }
+ if v.RaceWindow < 0 {
+ return errors.New("DriverParameters: RaceWindow must not be negative")
}
- return
-}
-func init() {
- VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &S3Volume{} })
-
- flag.Var(&s3VolumeAdder{theConfig},
- "s3-bucket-volume",
- "Use the given bucket as a storage volume. Can be given multiple times.")
- flag.StringVar(
- &s3RegionName,
- "s3-region",
- "",
- fmt.Sprintf("AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are %+q.", s3regions()))
- flag.StringVar(
- &s3Endpoint,
- "s3-endpoint",
- "",
- "Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use \"https://storage.googleapis.com\".")
- flag.StringVar(
- &s3AccessKeyFile,
- "s3-access-key-file",
- "",
- "`File` containing the access key used for subsequent -s3-bucket-volume arguments.")
- flag.StringVar(
- &s3SecretKeyFile,
- "s3-secret-key-file",
- "",
- "`File` containing the secret key used for subsequent -s3-bucket-volume arguments.")
- flag.DurationVar(
- &s3RaceWindow,
- "s3-race-window",
- 24*time.Hour,
- "Maximum eventual consistency latency for subsequent -s3-bucket-volume arguments.")
- flag.IntVar(
- &s3Replication,
- "s3-replication",
- 2,
- "Replication level reported to clients for subsequent -s3-bucket-volume arguments.")
- flag.BoolVar(
- &s3UnsafeDelete,
- "s3-unsafe-delete",
- false,
- "EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.")
-}
-
-// S3Volume implements Volume using an S3 bucket.
-type S3Volume struct {
- AccessKeyFile string
- SecretKeyFile string
- Endpoint string
- Region string
- Bucket string
- LocationConstraint bool
- IndexPageSize int
- S3Replication int
- ConnectTimeout arvados.Duration
- ReadTimeout arvados.Duration
- RaceWindow arvados.Duration
- ReadOnly bool
- UnsafeDelete bool
-
- bucket *s3.Bucket
-
- startOnce sync.Once
-}
-
-// Examples implements VolumeWithExamples.
-func (*S3Volume) Examples() []Volume {
- return []Volume{
- &S3Volume{
- AccessKeyFile: "/etc/aws_s3_access_key.txt",
- SecretKeyFile: "/etc/aws_s3_secret_key.txt",
- Endpoint: "",
- Region: "us-east-1",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
- ConnectTimeout: arvados.Duration(time.Minute),
- ReadTimeout: arvados.Duration(5 * time.Minute),
- },
- &S3Volume{
- AccessKeyFile: "/etc/gce_s3_access_key.txt",
- SecretKeyFile: "/etc/gce_s3_secret_key.txt",
- Endpoint: "https://storage.googleapis.com",
- Region: "",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
- ConnectTimeout: arvados.Duration(time.Minute),
- ReadTimeout: arvados.Duration(5 * time.Minute),
- },
- }
-}
-
-// Type implements Volume.
-func (*S3Volume) Type() string {
- return "S3"
-}
-
-// Start populates private fields and verifies the configuration is
-// valid.
-func (v *S3Volume) Start() error {
- region, ok := aws.Regions[v.Region]
- if v.Endpoint == "" {
- if !ok {
- return fmt.Errorf("unrecognized region %+q; try specifying -s3-endpoint instead", v.Region)
- }
- } else if ok {
- return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
- "specify empty endpoint (\"-s3-endpoint=\") or use a different region name", v.Region, v.Endpoint)
- } else {
- region = aws.Region{
- Name: v.Region,
- S3Endpoint: v.Endpoint,
- S3LocationConstraint: v.LocationConstraint,
- }
+ if v.V2Signature {
+ return errors.New("DriverParameters: V2Signature is not supported")
}
- var err error
- var auth aws.Auth
- auth.AccessKey, err = readKeyFromFile(v.AccessKeyFile)
- if err != nil {
- return err
+ defaultResolver := endpoints.NewDefaultResolver()
+
+ cfg := defaults.Config()
+
+ if v.Endpoint == "" && v.Region == "" {
+ return fmt.Errorf("AWS region or endpoint must be specified")
+ } else if v.Endpoint != "" || ec2metadataHostname != "" {
+ myCustomResolver := func(service, region string) (aws.Endpoint, error) {
+ if v.Endpoint != "" && service == "s3" {
+ return aws.Endpoint{
+ URL: v.Endpoint,
+ SigningRegion: region,
+ }, nil
+ } else if service == "ec2metadata" && ec2metadataHostname != "" {
+ return aws.Endpoint{
+ URL: ec2metadataHostname,
+ }, nil
+ } else {
+ return defaultResolver.ResolveEndpoint(service, region)
+ }
+ }
+ cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
}
- auth.SecretKey, err = readKeyFromFile(v.SecretKeyFile)
- if err != nil {
- return err
+ if v.Region == "" {
+ // Endpoint is already specified (otherwise we would
+ // have errored out above), but Region is also
+ // required by the aws sdk, in order to determine
+ // SignatureVersions.
+ v.Region = "us-east-1"
}
+ cfg.Region = v.Region
// Zero timeouts mean "wait forever", which is a bad
// default. Default to long timeouts instead.
if v.ConnectTimeout == 0 {
- v.ConnectTimeout = arvados.Duration(time.Minute)
+ v.ConnectTimeout = s3DefaultConnectTimeout
}
if v.ReadTimeout == 0 {
- v.ReadTimeout = arvados.Duration(10 * time.Minute)
- }
+ v.ReadTimeout = s3DefaultReadTimeout
+ }
+
+ creds := aws.NewChainProvider(
+ []aws.CredentialsProvider{
+ aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
+ ec2rolecreds.New(ec2metadata.New(cfg), func(opts *ec2rolecreds.ProviderOptions) {
+ // (from aws-sdk-go-v2 comments)
+ // "allow the credentials to trigger
+ // refreshing prior to the credentials
+ // actually expiring. This is
+ // beneficial so race conditions with
+ // expiring credentials do not cause
+ // request to fail unexpectedly due to
+ // ExpiredTokenException exceptions."
+ //
+ // (from
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html)
+ // "We make new credentials available
+ // at least five minutes before the
+ // expiration of the old credentials."
+ opts.ExpiryWindow = 5 * time.Minute
+ }),
+ })
+
+ cfg.Credentials = creds
- client := s3.New(auth, region)
- client.ConnectTimeout = time.Duration(v.ConnectTimeout)
- client.ReadTimeout = time.Duration(v.ReadTimeout)
- v.bucket = &s3.Bucket{
- S3: client,
- Name: v.Bucket,
+ v.bucket = &s3Bucket{
+ bucket: v.Bucket,
+ svc: s3.New(cfg),
}
+
+ // Set up prometheus metrics
+ lbls := prometheus.Labels{"device_id": v.DeviceID()}
+ v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
+
return nil
}
-// getReader wraps (Bucket)GetReader.
-//
-// In situations where (Bucket)GetReader would fail because the block
-// disappeared in a Trash race, getReader calls fixRace to recover the
-// data, and tries again.
-func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
- rdr, err = v.bucket.GetReader(loc)
- err = v.translateError(err)
- if err == nil || !os.IsNotExist(err) {
- return
+// DeviceID returns a globally unique ID for the storage bucket.
+func (v *s3Volume) DeviceID() string {
+ return "s3://" + v.Endpoint + "/" + v.Bucket
+}
+
+// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
+// and deletes them from the volume.
+func (v *s3Volume) EmptyTrash() {
+ var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
+
+ // Define "ready to delete" as "...when EmptyTrash started".
+ startT := time.Now()
+
+ emptyOneKey := func(trash *s3.Object) {
+ key := strings.TrimPrefix(*trash.Key, "trash/")
+ loc, isblk := v.isKeepBlock(key)
+ if !isblk {
+ return
+ }
+ atomic.AddInt64(&bytesInTrash, *trash.Size)
+ atomic.AddInt64(&blocksInTrash, 1)
+
+ trashT := *trash.LastModified
+ recent, err := v.head("recent/" + key)
+ if err != nil && os.IsNotExist(v.translateError(err)) {
+ v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
+ err = v.BlockUntrash(loc)
+ if err != nil {
+ v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
+ }
+ return
+ } else if err != nil {
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
+ return
+ }
+ if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
+ if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
+ // recent/key is too old to protect
+ // loc from being Trashed again during
+ // the raceWindow that starts if we
+ // delete trash/X now.
+ //
+ // Note this means (TrashSweepInterval
+ // < BlobSigningTTL - raceWindow) is
+ // necessary to avoid starvation.
+ v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
+ v.fixRace(key)
+ v.BlockTouch(loc)
+ return
+ }
+ _, err := v.head(key)
+ if os.IsNotExist(err) {
+ v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
+ v.fixRace(key)
+ return
+ } else if err != nil {
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
+ return
+ }
+ }
+ if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
+ return
+ }
+ err = v.bucket.Del(*trash.Key)
+ if err != nil {
+ v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
+ return
+ }
+ atomic.AddInt64(&bytesDeleted, *trash.Size)
+ atomic.AddInt64(&blocksDeleted, 1)
+
+ _, err = v.head(*trash.Key)
+ if err == nil {
+ v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
+ return
+ }
+ if !os.IsNotExist(v.translateError(err)) {
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
+ return
+ }
+ err = v.bucket.Del("recent/" + key)
+ if err != nil {
+ v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
+ }
}
- _, err = v.bucket.Head("recent/"+loc, nil)
- err = v.translateError(err)
- if err != nil {
- // If we can't read recent/X, there's no point in
- // trying fixRace. Give up.
- return
+
+ var wg sync.WaitGroup
+ todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
+ for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for key := range todo {
+ emptyOneKey(key)
+ }
+ }()
}
- if !v.fixRace(loc) {
- err = os.ErrNotExist
- return
+
+ trashL := s3awsLister{
+ Logger: v.logger,
+ Bucket: v.bucket,
+ Prefix: "trash/",
+ PageSize: v.IndexPageSize,
+ Stats: &v.bucket.stats,
}
- rdr, err = v.bucket.GetReader(loc)
- if err != nil {
- log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
- err = v.translateError(err)
+ for trash := trashL.First(); trash != nil; trash = trashL.Next() {
+ todo <- trash
}
- return
+ close(todo)
+ wg.Wait()
+
+ if err := trashL.Error(); err != nil {
+ v.logger.WithError(err).Error("EmptyTrash: lister failed")
+ }
+ v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
-// Get a block: copy the block data into buf, and return the number of
-// bytes copied.
-func (v *S3Volume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- ready := make(chan bool)
- var rdr io.ReadCloser
- var err error
- go func() {
- rdr, err = v.getReader(loc)
- close(ready)
- }()
- select {
- case <-ctx.Done():
- theConfig.debugLogf("s3: abandoning getReader() because %s", ctx.Err())
- return 0, ctx.Err()
- case <-ready:
- if err != nil {
- return 0, err
+// fixRace(X) is called when "recent/X" exists but "X" doesn't
+// exist. If the timestamps on "recent/X" and "trash/X" indicate there
+// was a race between Put and Trash, fixRace recovers from the race by
+// Untrashing the block.
+func (v *s3Volume) fixRace(key string) bool {
+ trash, err := v.head("trash/" + key)
+ if err != nil {
+ if !os.IsNotExist(v.translateError(err)) {
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
}
+ return false
}
- var n int
- ready = make(chan bool)
- go func() {
- defer close(ready)
+ recent, err := v.head("recent/" + key)
+ if err != nil {
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
+ return false
+ }
- defer rdr.Close()
- n, err = io.ReadFull(rdr, buf)
+ recentTime := *recent.LastModified
+ trashTime := *trash.LastModified
+ ageWhenTrashed := trashTime.Sub(recentTime)
+ if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
+ // No evidence of a race: block hasn't been written
+ // since it became eligible for Trash. No fix needed.
+ return false
+ }
- switch err {
- case nil, io.EOF, io.ErrUnexpectedEOF:
- err = nil
- default:
- err = v.translateError(err)
- }
- }()
- select {
- case <-ctx.Done():
- theConfig.debugLogf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
- rdr.Close()
- // Must wait for ReadFull to return, to ensure it
- // doesn't write to buf after we return.
- theConfig.debugLogf("s3: waiting for ReadFull() to fail")
- <-ready
- return 0, ctx.Err()
- case <-ready:
- return n, err
- }
-}
-
-// Compare the given data with the stored data.
-func (v *S3Volume) Compare(loc string, expect []byte) error {
- rdr, err := v.getReader(loc)
+ v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
+ v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
+ err = v.safeCopy(key, "trash/"+key)
if err != nil {
- return err
+ v.logger.WithError(err).Error("fixRace: copy failed")
+ return false
}
- defer rdr.Close()
- return v.translateError(compareReaderWithBuf(rdr, expect, loc[:32]))
+ return true
}
-// Put writes a block.
-func (v *S3Volume) Put(loc string, block []byte) error {
- if v.ReadOnly {
- return MethodDisabledError
+func (v *s3Volume) head(key string) (result *s3.HeadObjectOutput, err error) {
+ input := &s3.HeadObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
}
- var opts s3.Options
- if len(block) > 0 {
- md5, err := hex.DecodeString(loc)
+
+ req := v.bucket.svc.HeadObjectRequest(input)
+ res, err := req.Send(context.TODO())
+
+ v.bucket.stats.TickOps("head")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
+ v.bucket.stats.TickErr(err)
+
+ if err != nil {
+ return nil, v.translateError(err)
+ }
+ result = res.HeadObjectOutput
+ return
+}
+
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the S3 bucket.
+func (v *s3Volume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+ key := v.key(hash)
+ err := v.readWorker(ctx, key, w)
+ if err != nil {
+ err = v.translateError(err)
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ _, err = v.head("recent/" + key)
+ err = v.translateError(err)
if err != nil {
+ // If we can't read recent/X, there's no point in
+ // trying fixRace. Give up.
+ return err
+ }
+ if !v.fixRace(key) {
+ err = os.ErrNotExist
+ return err
+ }
+
+ err = v.readWorker(ctx, key, w)
+ if err != nil {
+ v.logger.Warnf("reading %s after successful fixRace: %s", hash, err)
+ err = v.translateError(err)
return err
}
- opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
- }
- err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, opts)
- if err != nil {
- return v.translateError(err)
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ return nil
+}
+
+func (v *s3Volume) readWorker(ctx context.Context, key string, dst io.WriterAt) error {
+ downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
+ u.PartSize = s3downloaderPartSize
+ u.Concurrency = s3downloaderReadConcurrency
+ })
+ count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ })
+ v.bucket.stats.TickOps("get")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
+ v.bucket.stats.TickErr(err)
+ v.bucket.stats.TickInBytes(uint64(count))
return v.translateError(err)
}
-// Touch sets the timestamp for the given locator to the current time.
-func (v *S3Volume) Touch(loc string) error {
- if v.ReadOnly {
- return MethodDisabledError
+func (v *s3Volume) writeObject(ctx context.Context, key string, r io.Reader) error {
+ if r == nil {
+ // r == nil leads to a memory violation in func readFillBuf in
+ // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
+ r = bytes.NewReader(nil)
}
- _, err := v.bucket.Head(loc, nil)
- err = v.translateError(err)
- if os.IsNotExist(err) && v.fixRace(loc) {
- // The data object got trashed in a race, but fixRace
- // rescued it.
- } else if err != nil {
- return err
+
+ uploadInput := s3manager.UploadInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ Body: r,
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+
+ if loc, ok := v.isKeepBlock(key); ok {
+ var contentMD5 string
+ md5, err := hex.DecodeString(loc)
+ if err != nil {
+ return v.translateError(err)
+ }
+ contentMD5 = base64.StdEncoding.EncodeToString(md5)
+ uploadInput.ContentMD5 = &contentMD5
+ }
+
+ // Experimentation indicated that using concurrency 5 yields the best
+ // throughput, better than higher concurrency (10 or 13) by ~5%.
+ // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
+ // is detrimental to throughput (minus ~15%).
+ uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
+ u.PartSize = s3uploaderPartSize
+ u.Concurrency = s3uploaderWriteConcurrency
+ })
+
+ // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
+ // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
+ // block, so there is no extra memory use to be concerned about. See
+ // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
+ // calculating the Sha-256 because we don't need it; we already use md5sum
+ // hashes that match the name of the block.
+ _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
+ r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
+ }))
+
+ v.bucket.stats.TickOps("put")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
+ v.bucket.stats.TickErr(err)
+
return v.translateError(err)
}
-// Mtime returns the stored timestamp for the given locator.
-func (v *S3Volume) Mtime(loc string) (time.Time, error) {
- _, err := v.bucket.Head(loc, nil)
+// Put writes a block.
+func (v *s3Volume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+ // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
+ // sdk to avoid memory allocation there. See #17339 for more information.
+ rdr := bytes.NewReader(data)
+ r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+ key := v.key(hash)
+ err := v.writeObject(ctx, key, r)
if err != nil {
- return zeroTime, v.translateError(err)
+ return err
}
- resp, err := v.bucket.Head("recent/"+loc, nil)
- err = v.translateError(err)
- if os.IsNotExist(err) {
- // The data object X exists, but recent/X is missing.
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
- if err != nil {
- log.Printf("error: creating %q: %s", "recent/"+loc, err)
- return zeroTime, v.translateError(err)
+ return v.writeObject(ctx, "recent/"+key, nil)
+}
+
+type s3awsLister struct {
+ Logger logrus.FieldLogger
+ Bucket *s3Bucket
+ Prefix string
+ PageSize int
+ Stats *s3awsbucketStats
+ ContinuationToken string
+ buf []s3.Object
+ err error
+}
+
+// First fetches the first page and returns the first item. It returns
+// nil if the response is the empty set or an error occurs.
+func (lister *s3awsLister) First() *s3.Object {
+ lister.getPage()
+ return lister.pop()
+}
+
+// Next returns the next item, fetching the next page if necessary. It
+// returns nil if the last available item has already been fetched, or
+// an error occurs.
+func (lister *s3awsLister) Next() *s3.Object {
+ if len(lister.buf) == 0 && lister.ContinuationToken != "" {
+ lister.getPage()
+ }
+ return lister.pop()
+}
+
+// Return the most recent error encountered by First or Next.
+func (lister *s3awsLister) Error() error {
+ return lister.err
+}
+
+func (lister *s3awsLister) getPage() {
+ lister.Stats.TickOps("list")
+ lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
+
+ var input *s3.ListObjectsV2Input
+ if lister.ContinuationToken == "" {
+ input = &s3.ListObjectsV2Input{
+ Bucket: aws.String(lister.Bucket.bucket),
+ MaxKeys: aws.Int64(int64(lister.PageSize)),
+ Prefix: aws.String(lister.Prefix),
}
- log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
- resp, err = v.bucket.Head("recent/"+loc, nil)
- if err != nil {
- log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
- return zeroTime, v.translateError(err)
+ } else {
+ input = &s3.ListObjectsV2Input{
+ Bucket: aws.String(lister.Bucket.bucket),
+ MaxKeys: aws.Int64(int64(lister.PageSize)),
+ Prefix: aws.String(lister.Prefix),
+ ContinuationToken: &lister.ContinuationToken,
}
- } else if err != nil {
- // HEAD recent/X failed for some other reason.
- return zeroTime, err
}
- return v.lastModified(resp)
+
+ req := lister.Bucket.svc.ListObjectsV2Request(input)
+ resp, err := req.Send(context.Background())
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ lister.err = aerr
+ } else {
+ lister.err = err
+ }
+ return
+ }
+
+ if *resp.IsTruncated {
+ lister.ContinuationToken = *resp.NextContinuationToken
+ } else {
+ lister.ContinuationToken = ""
+ }
+ lister.buf = make([]s3.Object, 0, len(resp.Contents))
+ for _, key := range resp.Contents {
+ if !strings.HasPrefix(*key.Key, lister.Prefix) {
+ lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
+ continue
+ }
+ lister.buf = append(lister.buf, key)
+ }
+}
+
+func (lister *s3awsLister) pop() (k *s3.Object) {
+ if len(lister.buf) > 0 {
+ k = &lister.buf[0]
+ lister.buf = lister.buf[1:]
+ }
+ return
}
-// IndexTo writes a complete list of locators with the given prefix
+// Index writes a complete list of locators with the given prefix
// for which Get() can retrieve data.
-func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
+func (v *s3Volume) Index(ctx context.Context, prefix string, writer io.Writer) error {
+ prefix = v.key(prefix)
// Use a merge sort to find matching sets of X and recent/X.
- dataL := s3Lister{
+ dataL := s3awsLister{
+ Logger: v.logger,
Bucket: v.bucket,
Prefix: prefix,
PageSize: v.IndexPageSize,
+ Stats: &v.bucket.stats,
}
- recentL := s3Lister{
+ recentL := s3awsLister{
+ Logger: v.logger,
Bucket: v.bucket,
Prefix: "recent/" + prefix,
PageSize: v.IndexPageSize,
+ Stats: &v.bucket.stats,
}
- for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
- if data.Key >= "g" {
+ for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ if *data.Key >= "g" {
// Conveniently, "recent/*" and "trash/*" are
// lexically greater than all hex-encoded data
// hashes, so stopping here avoids iterating
// over all of them needlessly with dataL.
break
}
- if !v.isKeepBlock(data.Key) {
+ loc, isblk := v.isKeepBlock(*data.Key)
+ if !isblk {
continue
}
stamp := data
// Advance to the corresponding recent/X marker, if any
- for recent != nil {
- if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
+ for recent != nil && recentL.Error() == nil {
+ if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
recent = recentL.Next()
continue
} else if cmp == 0 {
break
}
}
- t, err := time.Parse(time.RFC3339, stamp.LastModified)
- if err != nil {
+ if err := recentL.Error(); err != nil {
return err
}
- fmt.Fprintf(writer, "%s+%d %d\n", data.Key, data.Size, t.UnixNano())
+ // We truncate sub-second precision here. Otherwise
+ // timestamps will never match the RFC1123-formatted
+ // Last-Modified values parsed by Mtime().
+ fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
}
- return nil
+ return dataL.Error()
}
-// Trash a Keep block.
-func (v *S3Volume) Trash(loc string) error {
- if v.ReadOnly {
- return MethodDisabledError
- }
- if t, err := v.Mtime(loc); err != nil {
- return err
- } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
- return nil
+// Mtime returns the stored timestamp for the given locator.
+func (v *s3Volume) Mtime(loc string) (time.Time, error) {
+ key := v.key(loc)
+ _, err := v.head(key)
+ if err != nil {
+ return s3AWSZeroTime, v.translateError(err)
}
- if theConfig.TrashLifetime == 0 {
- if !s3UnsafeDelete {
- return ErrS3TrashDisabled
+ resp, err := v.head("recent/" + key)
+ err = v.translateError(err)
+ if os.IsNotExist(err) {
+ // The data object X exists, but recent/X is missing.
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ if err != nil {
+ v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
+ return s3AWSZeroTime, v.translateError(err)
}
- return v.bucket.Del(loc)
- }
- err := v.checkRaceWindow(loc)
- if err != nil {
- return err
+ v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
+ resp, err = v.head("recent/" + key)
+ if err != nil {
+ v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
+ return s3AWSZeroTime, v.translateError(err)
+ }
+ } else if err != nil {
+ // HEAD recent/X failed for some other reason.
+ return s3AWSZeroTime, err
}
- err = v.safeCopy("trash/"+loc, loc)
- if err != nil {
+ return *resp.LastModified, err
+}
+
+// InternalStats returns bucket I/O and API call counters.
+func (v *s3Volume) InternalStats() interface{} {
+ return &v.bucket.stats
+}
+
+// BlockTouch sets the timestamp for the given locator to the current time.
+func (v *s3Volume) BlockTouch(hash string) error {
+ key := v.key(hash)
+ _, err := v.head(key)
+ err = v.translateError(err)
+ if os.IsNotExist(err) && v.fixRace(key) {
+ // The data object got trashed in a race, but fixRace
+ // rescued it.
+ } else if err != nil {
return err
}
- return v.translateError(v.bucket.Del(loc))
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ return v.translateError(err)
}
-// checkRaceWindow returns a non-nil error if trash/loc is, or might
-// be, in the race window (i.e., it's not safe to trash loc).
-func (v *S3Volume) checkRaceWindow(loc string) error {
- resp, err := v.bucket.Head("trash/"+loc, nil)
+// checkRaceWindow returns a non-nil error if trash/key is, or might
+// be, in the race window (i.e., it's not safe to trash key).
+func (v *s3Volume) checkRaceWindow(key string) error {
+ resp, err := v.head("trash/" + key)
err = v.translateError(err)
if os.IsNotExist(err) {
// OK, trash/X doesn't exist so we're not in the race
// we're in the race window
return err
}
- t, err := v.lastModified(resp)
- if err != nil {
- // Can't parse timestamp
- return err
- }
- safeWindow := t.Add(theConfig.TrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
+ t := resp.LastModified
+ safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
if safeWindow <= 0 {
// We can't count on "touch trash/X" to prolong
// trash/X's lifetime. The new timestamp might not
// become visible until now+raceWindow, and EmptyTrash
// is allowed to delete trash/X before then.
- return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
+ return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
}
// trash/X exists, but it won't be eligible for deletion until
// after now+raceWindow, so it's safe to overwrite it.
return nil
}
-// safeCopy calls PutCopy, and checks the response to make sure the
-// copy succeeded and updated the timestamp on the destination object
-// (PutCopy returns 200 OK if the request was received, even if the
-// copy failed).
-func (v *S3Volume) safeCopy(dst, src string) error {
- resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
- ContentType: "application/octet-stream",
- MetadataDirective: "REPLACE",
- }, v.bucket.Name+"/"+src)
- err = v.translateError(err)
- if err != nil {
- return err
- }
- if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
- return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
- } else if time.Now().Sub(t) > maxClockSkew {
- return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
+func (b *s3Bucket) Del(path string) error {
+ input := &s3.DeleteObjectInput{
+ Bucket: aws.String(b.bucket),
+ Key: aws.String(path),
}
- return nil
-}
-
-// Get the LastModified header from resp, and parse it as RFC1123 or
-// -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
-func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
- s := resp.Header.Get("Last-Modified")
- t, err = time.Parse(time.RFC1123, s)
- if err != nil && s != "" {
- // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
- // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
- // as required by HTTP spec. If it's not a valid HTTP
- // header value, it's probably AWS (or s3test) giving
- // us a nearly-RFC1123 timestamp.
- t, err = time.Parse(nearlyRFC1123, s)
- }
- return
+ req := b.svc.DeleteObjectRequest(input)
+ _, err := req.Send(context.Background())
+ b.stats.TickOps("delete")
+ b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
+ b.stats.TickErr(err)
+ return err
}
-// Untrash moves block from trash back into store
-func (v *S3Volume) Untrash(loc string) error {
- err := v.safeCopy(loc, "trash/"+loc)
- if err != nil {
+// Trash a Keep block.
+func (v *s3Volume) BlockTrash(loc string) error {
+ if t, err := v.Mtime(loc); err != nil {
return err
+ } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
+ return nil
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
- return v.translateError(err)
-}
-
-// Status returns a *VolumeStatus representing the current in-use
-// storage capacity and a fake available capacity that doesn't make
-// the volume seem full or nearly-full.
-func (v *S3Volume) Status() *VolumeStatus {
- return &VolumeStatus{
- DeviceNum: 1,
- BytesFree: BlockSize * 1000,
- BytesUsed: 1,
- }
-}
-
-// String implements fmt.Stringer.
-func (v *S3Volume) String() string {
- return fmt.Sprintf("s3-bucket:%+q", v.bucket.Name)
-}
-
-// Writable returns false if all future Put, Mtime, and Delete calls
-// are expected to fail.
-func (v *S3Volume) Writable() bool {
- return !v.ReadOnly
-}
-
-// Replication returns the storage redundancy of the underlying
-// device. Configured via command line flag.
-func (v *S3Volume) Replication() int {
- return v.S3Replication
-}
-
-var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
-
-func (v *S3Volume) isKeepBlock(s string) bool {
- return s3KeepBlockRegexp.MatchString(s)
-}
-
-// fixRace(X) is called when "recent/X" exists but "X" doesn't
-// exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
-// there was a race between Put and Trash, fixRace recovers from the
-// race by Untrashing the block.
-func (v *S3Volume) fixRace(loc string) bool {
- trash, err := v.bucket.Head("trash/"+loc, nil)
- if err != nil {
- if !os.IsNotExist(v.translateError(err)) {
- log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
+ key := v.key(loc)
+ if v.cluster.Collections.BlobTrashLifetime == 0 {
+ if !v.UnsafeDelete {
+ return errS3TrashDisabled
}
- return false
- }
- trashTime, err := v.lastModified(trash)
- if err != nil {
- log.Printf("error: fixRace: parse %q: %s", trash.Header.Get("Last-Modified"), err)
- return false
- }
-
- recent, err := v.bucket.Head("recent/"+loc, nil)
- if err != nil {
- log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
- return false
+ return v.translateError(v.bucket.Del(key))
}
- recentTime, err := v.lastModified(recent)
+ err := v.checkRaceWindow(key)
if err != nil {
- log.Printf("error: fixRace: parse %q: %s", recent.Header.Get("Last-Modified"), err)
- return false
- }
-
- ageWhenTrashed := trashTime.Sub(recentTime)
- if ageWhenTrashed >= theConfig.BlobSignatureTTL.Duration() {
- // No evidence of a race: block hasn't been written
- // since it became eligible for Trash. No fix needed.
- return false
+ return err
}
-
- log.Printf("notice: fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, theConfig.BlobSignatureTTL)
- log.Printf("notice: fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
- err = v.safeCopy(loc, "trash/"+loc)
+ err = v.safeCopy("trash/"+key, key)
if err != nil {
- log.Printf("error: fixRace: %s", err)
- return false
- }
- return true
-}
-
-func (v *S3Volume) translateError(err error) error {
- switch err := err.(type) {
- case *s3.Error:
- if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") ||
- strings.Contains(err.Error(), "Not Found") {
- return os.ErrNotExist
- }
- // Other 404 errors like NoSuchVersion and
- // NoSuchBucket are different problems which should
- // get called out downstream, so we don't convert them
- // to os.ErrNotExist.
- }
- return err
-}
-
-// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
-// and deletes them from the volume.
-func (v *S3Volume) EmptyTrash() {
- var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
-
- // Use a merge sort to find matching sets of trash/X and recent/X.
- trashL := s3Lister{
- Bucket: v.bucket,
- Prefix: "trash/",
- PageSize: v.IndexPageSize,
- }
- // Define "ready to delete" as "...when EmptyTrash started".
- startT := time.Now()
- for trash := trashL.First(); trash != nil; trash = trashL.Next() {
- loc := trash.Key[6:]
- if !v.isKeepBlock(loc) {
- continue
- }
- bytesInTrash += trash.Size
- blocksInTrash++
-
- trashT, err := time.Parse(time.RFC3339, trash.LastModified)
- if err != nil {
- log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
- continue
- }
- recent, err := v.bucket.Head("recent/"+loc, nil)
- if err != nil && os.IsNotExist(v.translateError(err)) {
- log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
- err = v.Untrash(loc)
- if err != nil {
- log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
- }
- continue
- } else if err != nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
- continue
- }
- recentT, err := v.lastModified(recent)
- if err != nil {
- log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
- continue
- }
- if trashT.Sub(recentT) < theConfig.BlobSignatureTTL.Duration() {
- if age := startT.Sub(recentT); age >= theConfig.BlobSignatureTTL.Duration()-time.Duration(v.RaceWindow) {
- // recent/loc is too old to protect
- // loc from being Trashed again during
- // the raceWindow that starts if we
- // delete trash/X now.
- //
- // Note this means (TrashCheckInterval
- // < BlobSignatureTTL - raceWindow) is
- // necessary to avoid starvation.
- log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
- v.fixRace(loc)
- v.Touch(loc)
- continue
- } else if _, err := v.bucket.Head(loc, nil); os.IsNotExist(err) {
- log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
- v.fixRace(loc)
- continue
- } else if err != nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
- continue
- }
- }
- if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
- continue
- }
- err = v.bucket.Del(trash.Key)
- if err != nil {
- log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
- continue
- }
- bytesDeleted += trash.Size
- blocksDeleted++
-
- _, err = v.bucket.Head(loc, nil)
- if os.IsNotExist(err) {
- err = v.bucket.Del("recent/" + loc)
- if err != nil {
- log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
- }
- } else if err != nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
- }
- }
- if err := trashL.Error(); err != nil {
- log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
+ return err
}
- log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ return v.translateError(v.bucket.Del(key))
}
-type s3Lister struct {
- Bucket *s3.Bucket
- Prefix string
- PageSize int
- nextMarker string
- buf []s3.Key
- err error
-}
-
-// First fetches the first page and returns the first item. It returns
-// nil if the response is the empty set or an error occurs.
-func (lister *s3Lister) First() *s3.Key {
- lister.getPage()
- return lister.pop()
-}
-
-// Next returns the next item, fetching the next page if necessary. It
-// returns nil if the last available item has already been fetched, or
-// an error occurs.
-func (lister *s3Lister) Next() *s3.Key {
- if len(lister.buf) == 0 && lister.nextMarker != "" {
- lister.getPage()
+// BlockUntrash moves block from trash back into store
+func (v *s3Volume) BlockUntrash(hash string) error {
+ key := v.key(hash)
+ err := v.safeCopy(key, "trash/"+key)
+ if err != nil {
+ return err
}
- return lister.pop()
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
+ return v.translateError(err)
}
-// Return the most recent error encountered by First or Next.
-func (lister *s3Lister) Error() error {
- return lister.err
+type s3awsbucketStats struct {
+ statsTicker
+ Ops uint64
+ GetOps uint64
+ PutOps uint64
+ HeadOps uint64
+ DelOps uint64
+ ListOps uint64
}
-func (lister *s3Lister) getPage() {
- resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
- lister.nextMarker = ""
- if err != nil {
- lister.err = err
+func (s *s3awsbucketStats) TickErr(err error) {
+ if err == nil {
return
}
- if resp.IsTruncated {
- lister.nextMarker = resp.NextMarker
- }
- lister.buf = make([]s3.Key, 0, len(resp.Contents))
- for _, key := range resp.Contents {
- if !strings.HasPrefix(key.Key, lister.Prefix) {
- log.Printf("warning: s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
- continue
+ errType := fmt.Sprintf("%T", err)
+ if aerr, ok := err.(awserr.Error); ok {
+ if reqErr, ok := err.(awserr.RequestFailure); ok {
+ // A service error occurred
+ errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
+ } else {
+ errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
}
- lister.buf = append(lister.buf, key)
- }
-}
-
-func (lister *s3Lister) pop() (k *s3.Key) {
- if len(lister.buf) > 0 {
- k = &lister.buf[0]
- lister.buf = lister.buf[1:]
}
- return
+ s.statsTicker.TickErr(err, errType)
}