//
// SPDX-License-Identifier: AGPL-3.0
-package main
+package keepstore
import (
"bytes"
"github.com/sirupsen/logrus"
)
-// S3Volume implements Volume using an S3 bucket.
+func init() {
+ driver["S3"] = newS3AWSVolume
+}
+
+const (
+ s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
+ s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+ maxClockSkew = 600 * time.Second
+ nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
+)
+
+var (
+ ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+)
+
+// S3AWSVolume implements Volume using an S3 bucket.
type S3AWSVolume struct {
arvados.S3VolumeDriverParameters
AuthToken string // populated automatically when IAMRole is used
mu sync.Mutex
}
-// chooseS3VolumeDriver distinguishes between the old goamz driver and
-// aws-sdk-go based on the AlternateDriver feature flag
-func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
- if err != nil {
- return nil, err
- }
- if v.AlternateDriver {
- logger.Debugln("Using alternate S3 driver (aws-go)")
- return newS3AWSVolume(cluster, volume, logger, metrics)
- } else {
- logger.Debugln("Using standard S3 driver (goamz)")
- return newS3Volume(cluster, volume, logger, metrics)
- }
-}
+const (
+ PartSize = 5 * 1024 * 1024
+ ReadConcurrency = 13
+ WriteConcurrency = 5
+)
var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
var s3AWSZeroTime time.Time
-func (v *S3AWSVolume) isKeepBlock(s string) bool {
- return s3AWSKeepBlockRegexp.MatchString(s)
+func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
+ if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
+ s = s[v.PrefixLength+1:]
+ }
+ return s, s3AWSKeepBlockRegexp.MatchString(s)
+}
+
+// Return the key used for a given loc. If PrefixLength==0 then
+// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
+// "abc/abcdef0123", etc.
+func (v *S3AWSVolume) key(loc string) string {
+ if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
+ return loc[:v.PrefixLength] + "/" + loc
+ } else {
+ return loc
+ }
}
func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- logger.Debugln("in newS3AWSVolume")
v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
+ err := json.Unmarshal(volume.DriverParameters, v)
if err != nil {
return nil, err
}
v.logger = logger.WithField("Volume", v.String())
- v.logger.Debugln("in newS3AWSVolume after volume set")
return v, v.check("")
}
func (v *S3AWSVolume) translateError(err error) error {
- if aerr, ok := err.(awserr.Error); ok {
- switch aerr.Code() {
- case "NotFound":
+ if _, ok := err.(*aws.RequestCanceledError); ok {
+ return context.Canceled
+ } else if aerr, ok := err.(awserr.Error); ok {
+ if aerr.Code() == "NotFound" {
return os.ErrNotExist
- case "NoSuchKey":
+ } else if aerr.Code() == "NoSuchKey" {
return os.ErrNotExist
}
}
return err
}
-// safeCopy calls CopyObjectRequest, and checks the response to make sure the
-// copy succeeded and updated the timestamp on the destination object
+// safeCopy calls CopyObjectRequest, and checks the response to make
+// sure the copy succeeded and updated the timestamp on the
+// destination object
//
-// (If something goes wrong during the copy, the error will be embedded in the
-// 200 OK response)
+// (If something goes wrong during the copy, the error will be
+// embedded in the 200 OK response)
func (v *S3AWSVolume) safeCopy(dst, src string) error {
input := &s3.CopyObjectInput{
Bucket: aws.String(v.bucket.bucket),
return errors.New("DriverParameters: RaceWindow must not be negative")
}
+ if v.V2Signature {
+ return errors.New("DriverParameters: V2Signature is not supported")
+ }
+
defaultResolver := endpoints.NewDefaultResolver()
cfg := defaults.Config()
if v.Endpoint != "" && service == "s3" {
return aws.Endpoint{
URL: v.Endpoint,
- SigningRegion: v.Region,
+ SigningRegion: region,
}, nil
} else if service == "ec2metadata" && ec2metadataHostname != "" {
return aws.Endpoint{
URL: ec2metadataHostname,
}, nil
+ } else {
+ return defaultResolver.ResolveEndpoint(service, region)
}
-
- return defaultResolver.ResolveEndpoint(service, region)
}
cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
}
-
+ if v.Region == "" {
+ // Endpoint is already specified (otherwise we would
+ // have errored out above), but Region is also
+ // required by the aws sdk, in order to determine
+ // SignatureVersions.
+ v.Region = "us-east-1"
+ }
cfg.Region = v.Region
// Zero timeouts mean "wait forever", which is a bad
creds := aws.NewChainProvider(
[]aws.CredentialsProvider{
- aws.NewStaticCredentialsProvider(v.AccessKey, v.SecretKey, v.AuthToken),
+ aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
ec2rolecreds.New(ec2metadata.New(cfg)),
})
// Compare the given data with the stored data.
func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
+ key := v.key(loc)
errChan := make(chan error, 1)
go func() {
- _, err := v.Head("recent/" + loc)
+ _, err := v.head("recent/" + key)
errChan <- err
}()
var err error
case err = <-errChan:
}
if err != nil {
- // Checking for "loc" itself here would interfere with
- // future GET requests.
+ // Checking for the key itself here would interfere
+ // with future GET requests.
//
// On AWS, if X doesn't exist, a HEAD or GET request
// for X causes X's non-existence to be cached. Thus,
input := &s3.GetObjectInput{
Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(loc),
+ Key: aws.String(key),
}
req := v.bucket.svc.GetObjectRequest(input)
startT := time.Now()
emptyOneKey := func(trash *s3.Object) {
- v.logger.Warnf("EmptyTrash: looking for trash marker %s with last modified date %s", *trash.Key, *trash.LastModified)
- loc := strings.TrimPrefix(*trash.Key, "trash/")
- if !v.isKeepBlock(loc) {
+ key := strings.TrimPrefix(*trash.Key, "trash/")
+ loc, isblk := v.isKeepBlock(key)
+ if !isblk {
return
}
atomic.AddInt64(&bytesInTrash, *trash.Size)
atomic.AddInt64(&blocksInTrash, 1)
- trashT := *(trash.LastModified)
- v.logger.Infof("HEEEEEEE trashT key: %s, type: %T val: %s, startT is %s", *trash.Key, trashT, trashT, startT)
- recent, err := v.Head("recent/" + loc)
+ trashT := *trash.LastModified
+ recent, err := v.head("recent/" + key)
if err != nil && os.IsNotExist(v.translateError(err)) {
- v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
+ v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
err = v.Untrash(loc)
if err != nil {
v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
}
return
} else if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
return
}
- v.logger.Infof("recent.LastModified type: %T val: %s", recent.LastModified, recent.LastModified)
if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
- v.logger.Infof("HERE! recent.lastmodified is smaller than blobsigningttl")
if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
- // recent/loc is too old to protect
+ // recent/key is too old to protect
// loc from being Trashed again during
// the raceWindow that starts if we
// delete trash/X now.
// < BlobSigningTTL - raceWindow) is
// necessary to avoid starvation.
v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
- v.fixRace(loc)
+ v.fixRace(key)
v.Touch(loc)
return
}
- _, err := v.Head(loc)
+ _, err := v.head(key)
if os.IsNotExist(err) {
v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
- v.fixRace(loc)
+ v.fixRace(key)
return
} else if err != nil {
v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
}
}
if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
- v.logger.Infof("HERE! trashT for %s is smaller than blobtrashlifetime: %s < %s", *trash.Key, startT.Sub(trashT), v.cluster.Collections.BlobTrashLifetime.Duration())
return
}
err = v.bucket.Del(*trash.Key)
atomic.AddInt64(&bytesDeleted, *trash.Size)
atomic.AddInt64(&blocksDeleted, 1)
- v.logger.Infof("HERE! trash.Key %s should have been deleted", *trash.Key)
- _, err = v.Head(loc)
+ _, err = v.head(*trash.Key)
if err == nil {
v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
return
}
if !os.IsNotExist(v.translateError(err)) {
- v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
return
}
- err = v.bucket.Del("recent/" + loc)
+ err = v.bucket.Del("recent/" + key)
if err != nil {
- v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
+ v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
}
- v.logger.Infof("HERE! recent/%s should have been deleted", loc)
}
var wg sync.WaitGroup
if err := trashL.Error(); err != nil {
v.logger.WithError(err).Error("EmptyTrash: lister failed")
}
- v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
// fixRace(X) is called when "recent/X" exists but "X" doesn't
-// exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
-// there was a race between Put and Trash, fixRace recovers from the
-// race by Untrashing the block.
-func (v *S3AWSVolume) fixRace(loc string) bool {
- trash, err := v.Head("trash/" + loc)
+// exist. If the timestamps on "recent/X" and "trash/X" indicate there
+// was a race between Put and Trash, fixRace recovers from the race by
+// Untrashing the block.
+func (v *S3AWSVolume) fixRace(key string) bool {
+ trash, err := v.head("trash/" + key)
if err != nil {
if !os.IsNotExist(v.translateError(err)) {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+loc)
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
}
return false
}
- recent, err := v.Head("recent/" + loc)
+ recent, err := v.head("recent/" + key)
if err != nil {
- v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+loc)
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
return false
}
return false
}
- v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
- v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
- err = v.safeCopy(loc, "trash/"+loc)
+ v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
+ v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
+ err = v.safeCopy(key, "trash/"+key)
if err != nil {
v.logger.WithError(err).Error("fixRace: copy failed")
return false
return true
}
-func (v *S3AWSVolume) Head(loc string) (result *s3.HeadObjectOutput, err error) {
+func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error) {
input := &s3.HeadObjectInput{
Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(loc),
+ Key: aws.String(key),
}
req := v.bucket.svc.HeadObjectRequest(input)
// Get a block: copy the block data into buf, and return the number of
// bytes copied.
func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
- return getWithPipe(ctx, loc, buf, v)
-}
-
-func (v *S3AWSVolume) readWorker(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
- buf := make([]byte, 0, 67108864)
- awsBuf := aws.NewWriteAtBuffer(buf)
-
- downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
- u.PartSize = 5 * 1024 * 1024
- u.Concurrency = 13
- })
-
- v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
-
- _, err = downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(loc),
- })
- v.bucket.stats.TickOps("get")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
- v.bucket.stats.TickErr(err)
- if err != nil {
- return nil, v.translateError(err)
- }
- buf = awsBuf.Bytes()
-
- rdr = NewCountingReader(bytes.NewReader(buf), v.bucket.stats.TickInBytes)
- return
-}
-
-// ReadBlock implements BlockReader.
-func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
- rdr, err := v.readWorker(ctx, loc)
-
+ // Do not use getWithPipe here: the BlockReader interface does not pass
+ // through 'buf []byte', and we don't want to allocate two buffers for each
+ // read request. Instead, use a version of ReadBlock that accepts 'buf []byte'
+ // as an input.
+ key := v.key(loc)
+ count, err := v.readWorker(ctx, key, buf)
if err == nil {
- _, err2 := io.Copy(w, rdr)
- if err2 != nil {
- return err2
- }
- return err
+ return count, err
}
err = v.translateError(err)
if !os.IsNotExist(err) {
- return err
+ return 0, err
}
- _, err = v.Head("recent/" + loc)
+ _, err = v.head("recent/" + key)
err = v.translateError(err)
if err != nil {
// If we can't read recent/X, there's no point in
// trying fixRace. Give up.
- return err
+ return 0, err
}
- if !v.fixRace(loc) {
+ if !v.fixRace(key) {
err = os.ErrNotExist
- return err
+ return 0, err
}
- rdr, err = v.readWorker(ctx, loc)
+ count, err = v.readWorker(ctx, key, buf)
if err != nil {
v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
err = v.translateError(err)
- return err
+ return 0, err
}
-
- _, err = io.Copy(w, rdr)
-
- return err
+ return count, err
}
-func (b *s3AWSbucket) PutReader(path string, r io.Reader, length int64, contType string, contentMD5 string, contentSHA256 string) error {
- if length == 0 {
- // aws-sdk-go will only send Content-Length: 0 when reader
- // is nil due to net.http.Request.ContentLength
- // behavior. Otherwise, Content-Length header is
- // omitted which will cause some S3 services
- // (including AWS and Ceph RadosGW) to fail to create
- // empty objects.
- r = bytes.NewReader([]byte{})
- } else {
- r = NewCountingReader(r, b.stats.TickOutBytes)
- }
- uploader := s3manager.NewUploaderWithClient(b.svc)
- _, err := uploader.Upload(&s3manager.UploadInput{
- Bucket: aws.String(b.bucket),
- Key: aws.String(path),
- Body: r,
+func (v *S3AWSVolume) readWorker(ctx context.Context, key string, buf []byte) (int, error) {
+ awsBuf := aws.NewWriteAtBuffer(buf)
+ downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
+ u.PartSize = PartSize
+ u.Concurrency = ReadConcurrency
})
- b.stats.TickOps("put")
- b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.TickErr(err)
- return err
-}
+ v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
-// Put writes a block.
-func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
- return putWithPipe(ctx, loc, block, v)
+ count, err := downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
+ Bucket: aws.String(v.bucket.bucket),
+ Key: aws.String(key),
+ })
+ v.bucket.stats.TickOps("get")
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
+ v.bucket.stats.TickErr(err)
+ v.bucket.stats.TickInBytes(uint64(count))
+ return int(count), v.translateError(err)
}
-// WriteBlock implements BlockWriter.
-func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
+func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader) error {
+ if r == nil {
+ // r == nil leads to a memory violation in func readFillBuf in
+ // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
+ r = bytes.NewReader(nil)
}
- r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
uploadInput := s3manager.UploadInput{
Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(loc),
+ Key: aws.String(key),
Body: r,
}
- //var contentMD5, contentSHA256 string
- var contentMD5 string
- md5, err := hex.DecodeString(loc)
- if err != nil {
- return err
- }
- contentMD5 = base64.StdEncoding.EncodeToString(md5)
- // See if this is the empty block
- if contentMD5 != "d41d8cd98f00b204e9800998ecf8427e" {
+ if loc, ok := v.isKeepBlock(key); ok {
+ var contentMD5 string
+ md5, err := hex.DecodeString(loc)
+ if err != nil {
+ return v.translateError(err)
+ }
+ contentMD5 = base64.StdEncoding.EncodeToString(md5)
uploadInput.ContentMD5 = &contentMD5
- // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
- // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
- // block, so there is no extra memory use to be concerned about. See
- // makeSha256Reader in aws/signer/v4/v4.go.
}
- // Some experimentation indicated that using concurrency 5 yields the best
+ // Experimentation indicated that using concurrency 5 yields the best
// throughput, better than higher concurrency (10 or 13) by ~5%.
// Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
// is detrimental to througput (minus ~15%).
uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
- u.PartSize = 5 * 1024 * 1024
- u.Concurrency = 5
+ u.PartSize = PartSize
+ u.Concurrency = WriteConcurrency
})
- _, err = uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions())
+ // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
+ // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
+ // block, so there is no extra memory use to be concerned about. See
+ // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
+ // calculating the Sha-256 because we don't need it; we already use md5sum
+ // hashes that match the name of the block.
+ _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
+ r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
+ }))
v.bucket.stats.TickOps("put")
v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
v.bucket.stats.TickErr(err)
+
+ return v.translateError(err)
+}
+
+// Put writes a block.
+func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
+ // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
+ // sdk to avoid memory allocation there. See #17339 for more information.
+ if v.volume.ReadOnly {
+ return MethodDisabledError
+ }
+
+ rdr := bytes.NewReader(block)
+ r := NewCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+ key := v.key(loc)
+ err := v.writeObject(ctx, key, r)
if err != nil {
return err
}
-
- empty := bytes.NewReader([]byte{})
- _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String("recent/" + loc),
- Body: empty,
- })
- v.bucket.stats.TickOps("put")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
- v.bucket.stats.TickErr(err)
-
- return err
+ return v.writeObject(ctx, "recent/"+key, nil)
}
type s3awsLister struct {
Logger logrus.FieldLogger
- Bucket *s3AWSbucket //*s3.Bucket
+ Bucket *s3AWSbucket
Prefix string
PageSize int
Stats *s3awsbucketStats
// IndexTo writes a complete list of locators with the given prefix
// for which Get() can retrieve data.
func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
+ prefix = v.key(prefix)
// Use a merge sort to find matching sets of X and recent/X.
dataL := s3awsLister{
Logger: v.logger,
// over all of them needlessly with dataL.
break
}
- if !v.isKeepBlock(*data.Key) {
+ loc, isblk := v.isKeepBlock(*data.Key)
+ if !isblk {
continue
}
if err := recentL.Error(); err != nil {
return err
}
- fmt.Fprintf(writer, "%s+%d %d\n", *data.Key, *data.Size, stamp.LastModified.UnixNano())
+ // We truncate sub-second precision here. Otherwise
+ // timestamps will never match the RFC1123-formatted
+ // Last-Modified values parsed by Mtime().
+ fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
}
return dataL.Error()
}
// Mtime returns the stored timestamp for the given locator.
func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
- _, err := v.Head(loc)
+ key := v.key(loc)
+ _, err := v.head(key)
if err != nil {
return s3AWSZeroTime, v.translateError(err)
}
- resp, err := v.Head("recent/" + loc)
+ resp, err := v.head("recent/" + key)
err = v.translateError(err)
if os.IsNotExist(err) {
// The data object X exists, but recent/X is missing.
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
if err != nil {
- v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
+ v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
return s3AWSZeroTime, v.translateError(err)
}
- v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+loc)
- resp, err = v.Head("recent/" + loc)
+ v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
+ resp, err = v.head("recent/" + key)
if err != nil {
- v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
+ v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
return s3AWSZeroTime, v.translateError(err)
}
} else if err != nil {
if v.volume.ReadOnly {
return MethodDisabledError
}
- _, err := v.Head(loc)
+ key := v.key(loc)
+ _, err := v.head(key)
err = v.translateError(err)
- if os.IsNotExist(err) && v.fixRace(loc) {
+ if os.IsNotExist(err) && v.fixRace(key) {
// The data object got trashed in a race, but fixRace
// rescued it.
} else if err != nil {
return err
}
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
return v.translateError(err)
}
-// checkRaceWindow returns a non-nil error if trash/loc is, or might
-// be, in the race window (i.e., it's not safe to trash loc).
-func (v *S3AWSVolume) checkRaceWindow(loc string) error {
- resp, err := v.Head("trash/" + loc)
+// checkRaceWindow returns a non-nil error if trash/key is, or might
+// be, in the race window (i.e., it's not safe to trash key).
+func (v *S3AWSVolume) checkRaceWindow(key string) error {
+ resp, err := v.head("trash/" + key)
err = v.translateError(err)
if os.IsNotExist(err) {
// OK, trash/X doesn't exist so we're not in the race
// trash/X's lifetime. The new timestamp might not
// become visible until now+raceWindow, and EmptyTrash
// is allowed to delete trash/X before then.
- return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
+ return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
}
// trash/X exists, but it won't be eligible for deletion until
// after now+raceWindow, so it's safe to overwrite it.
}
req := b.svc.DeleteObjectRequest(input)
_, err := req.Send(context.Background())
- //err := b.Bucket().Del(path)
b.stats.TickOps("delete")
b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
b.stats.TickErr(err)
} else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
return nil
}
+ key := v.key(loc)
if v.cluster.Collections.BlobTrashLifetime == 0 {
if !v.UnsafeDelete {
return ErrS3TrashDisabled
}
- return v.translateError(v.bucket.Del(loc))
+ return v.translateError(v.bucket.Del(key))
}
- err := v.checkRaceWindow(loc)
+ err := v.checkRaceWindow(key)
if err != nil {
return err
}
- err = v.safeCopy("trash/"+loc, loc)
+ err = v.safeCopy("trash/"+key, key)
if err != nil {
return err
}
- return v.translateError(v.bucket.Del(loc))
+ return v.translateError(v.bucket.Del(key))
}
// Untrash moves block from trash back into store
func (v *S3AWSVolume) Untrash(loc string) error {
- err := v.safeCopy(loc, "trash/"+loc)
+ key := v.key(loc)
+ err := v.safeCopy(key, "trash/"+key)
if err != nil {
return err
}
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+key, nil)
return v.translateError(err)
}