1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/aws/awserr"
26 "github.com/aws/aws-sdk-go-v2/aws/defaults"
27 "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
28 "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
29 "github.com/aws/aws-sdk-go-v2/aws/endpoints"
30 "github.com/aws/aws-sdk-go-v2/service/s3"
31 "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
32 "github.com/prometheus/client_golang/prometheus"
33 "github.com/sirupsen/logrus"
36 // S3Volume implements Volume using an S3 bucket.
37 type S3AWSVolume struct {
38 arvados.S3VolumeDriverParameters
39 AuthToken string // populated automatically when IAMRole is used
40 AuthExpiration time.Time // populated automatically when IAMRole is used
42 cluster *arvados.Cluster
44 logger logrus.FieldLogger
45 metrics *volumeMetricsVecs
51 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
52 // wrapped bucket can be replaced atomically with SetBucket in order
53 // to update credentials.
54 type s3AWSbucket struct {
57 stats s3awsbucketStats
61 // chooseS3VolumeDriver distinguishes between the old goamz driver and
62 // aws-sdk-go based on the UseAWSS3v2Driver feature flag
63 func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
64 v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
65 err := json.Unmarshal(volume.DriverParameters, v)
69 if v.UseAWSS3v2Driver {
70 logger.Debugln("Using AWS S3 v2 driver")
71 return newS3AWSVolume(cluster, volume, logger, metrics)
73 logger.Debugln("Using goamz S3 driver")
74 return newS3Volume(cluster, volume, logger, metrics)
79 PartSize = 5 * 1024 * 1024
84 var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
85 var s3AWSZeroTime time.Time
87 func (v *S3AWSVolume) isKeepBlock(s string) bool {
88 return s3AWSKeepBlockRegexp.MatchString(s)
91 func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
92 v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
93 err := json.Unmarshal(volume.DriverParameters, v)
97 v.logger = logger.WithField("Volume", v.String())
101 func (v *S3AWSVolume) translateError(err error) error {
102 if aerr, ok := err.(awserr.Error); ok {
105 return os.ErrNotExist
107 return os.ErrNotExist
113 // safeCopy calls CopyObjectRequest, and checks the response to make sure the
114 // copy succeeded and updated the timestamp on the destination object
116 // (If something goes wrong during the copy, the error will be embedded in the
118 func (v *S3AWSVolume) safeCopy(dst, src string) error {
119 input := &s3.CopyObjectInput{
120 Bucket: aws.String(v.bucket.bucket),
121 ContentType: aws.String("application/octet-stream"),
122 CopySource: aws.String(v.bucket.bucket + "/" + src),
123 Key: aws.String(dst),
126 req := v.bucket.svc.CopyObjectRequest(input)
127 resp, err := req.Send(context.Background())
129 err = v.translateError(err)
130 if os.IsNotExist(err) {
132 } else if err != nil {
133 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
136 if resp.CopyObjectResult.LastModified == nil {
137 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
138 } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
139 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
144 func (v *S3AWSVolume) check(ec2metadataHostname string) error {
146 return errors.New("DriverParameters: Bucket must be provided")
148 if v.IndexPageSize == 0 {
149 v.IndexPageSize = 1000
151 if v.RaceWindow < 0 {
152 return errors.New("DriverParameters: RaceWindow must not be negative")
156 return errors.New("DriverParameters: V2Signature is not supported")
159 defaultResolver := endpoints.NewDefaultResolver()
161 cfg := defaults.Config()
163 if v.Endpoint == "" && v.Region == "" {
164 return fmt.Errorf("AWS region or endpoint must be specified")
165 } else if v.Endpoint != "" || ec2metadataHostname != "" {
166 myCustomResolver := func(service, region string) (aws.Endpoint, error) {
167 if v.Endpoint != "" && service == "s3" {
170 SigningRegion: v.Region,
172 } else if service == "ec2metadata" && ec2metadataHostname != "" {
174 URL: ec2metadataHostname,
178 return defaultResolver.ResolveEndpoint(service, region)
180 cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
183 cfg.Region = v.Region
185 // Zero timeouts mean "wait forever", which is a bad
186 // default. Default to long timeouts instead.
187 if v.ConnectTimeout == 0 {
188 v.ConnectTimeout = s3DefaultConnectTimeout
190 if v.ReadTimeout == 0 {
191 v.ReadTimeout = s3DefaultReadTimeout
194 creds := aws.NewChainProvider(
195 []aws.CredentialsProvider{
196 aws.NewStaticCredentialsProvider(v.AccessKey, v.SecretKey, v.AuthToken),
197 ec2rolecreds.New(ec2metadata.New(cfg)),
200 cfg.Credentials = creds
202 v.bucket = &s3AWSbucket{
207 // Set up prometheus metrics
208 lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
209 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
214 // String implements fmt.Stringer.
215 func (v *S3AWSVolume) String() string {
216 return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
219 // GetDeviceID returns a globally unique ID for the storage bucket.
220 func (v *S3AWSVolume) GetDeviceID() string {
221 return "s3://" + v.Endpoint + "/" + v.Bucket
224 // Compare the given data with the stored data.
225 func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
226 errChan := make(chan error, 1)
228 _, err := v.Head("recent/" + loc)
235 case err = <-errChan:
238 // Checking for "loc" itself here would interfere with
239 // future GET requests.
241 // On AWS, if X doesn't exist, a HEAD or GET request
242 // for X causes X's non-existence to be cached. Thus,
243 // if we test for X, then create X and return a
244 // signature to our client, the client might still get
245 // 404 from all keepstores when trying to read it.
247 // To avoid this, we avoid doing HEAD X or GET X until
248 // we know X has been written.
250 // Note that X might exist even though recent/X
251 // doesn't: for example, the response to HEAD recent/X
252 // might itself come from a stale cache. In such
253 // cases, we will return a false negative and
254 // PutHandler might needlessly create another replica
255 // on a different volume. That's not ideal, but it's
256 // better than passing the eventually-consistent
257 // problem on to our clients.
258 return v.translateError(err)
261 input := &s3.GetObjectInput{
262 Bucket: aws.String(v.bucket.bucket),
263 Key: aws.String(loc),
266 req := v.bucket.svc.GetObjectRequest(input)
267 result, err := req.Send(ctx)
269 return v.translateError(err)
271 return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
274 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
275 // and deletes them from the volume.
276 func (v *S3AWSVolume) EmptyTrash() {
277 if v.cluster.Collections.BlobDeleteConcurrency < 1 {
281 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
283 // Define "ready to delete" as "...when EmptyTrash started".
286 emptyOneKey := func(trash *s3.Object) {
287 loc := strings.TrimPrefix(*trash.Key, "trash/")
288 if !v.isKeepBlock(loc) {
291 atomic.AddInt64(&bytesInTrash, *trash.Size)
292 atomic.AddInt64(&blocksInTrash, 1)
294 trashT := *(trash.LastModified)
295 recent, err := v.Head("recent/" + loc)
296 if err != nil && os.IsNotExist(v.translateError(err)) {
297 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
300 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
303 } else if err != nil {
304 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
307 if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
308 if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
309 // recent/loc is too old to protect
310 // loc from being Trashed again during
311 // the raceWindow that starts if we
312 // delete trash/X now.
314 // Note this means (TrashSweepInterval
315 // < BlobSigningTTL - raceWindow) is
316 // necessary to avoid starvation.
317 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
322 _, err := v.Head(loc)
323 if os.IsNotExist(err) {
324 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
327 } else if err != nil {
328 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
332 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
335 err = v.bucket.Del(*trash.Key)
337 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
340 atomic.AddInt64(&bytesDeleted, *trash.Size)
341 atomic.AddInt64(&blocksDeleted, 1)
345 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
348 if !os.IsNotExist(v.translateError(err)) {
349 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
352 err = v.bucket.Del("recent/" + loc)
354 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
358 var wg sync.WaitGroup
359 todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
360 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
364 for key := range todo {
370 trashL := s3awsLister{
374 PageSize: v.IndexPageSize,
375 Stats: &v.bucket.stats,
377 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
383 if err := trashL.Error(); err != nil {
384 v.logger.WithError(err).Error("EmptyTrash: lister failed")
386 v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
389 // fixRace(X) is called when "recent/X" exists but "X" doesn't
390 // exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
391 // there was a race between Put and Trash, fixRace recovers from the
392 // race by Untrashing the block.
393 func (v *S3AWSVolume) fixRace(loc string) bool {
394 trash, err := v.Head("trash/" + loc)
396 if !os.IsNotExist(v.translateError(err)) {
397 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+loc)
402 recent, err := v.Head("recent/" + loc)
404 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+loc)
408 recentTime := *recent.LastModified
409 trashTime := *trash.LastModified
410 ageWhenTrashed := trashTime.Sub(recentTime)
411 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
412 // No evidence of a race: block hasn't been written
413 // since it became eligible for Trash. No fix needed.
417 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
418 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
419 err = v.safeCopy(loc, "trash/"+loc)
421 v.logger.WithError(err).Error("fixRace: copy failed")
427 func (v *S3AWSVolume) Head(loc string) (result *s3.HeadObjectOutput, err error) {
428 input := &s3.HeadObjectInput{
429 Bucket: aws.String(v.bucket.bucket),
430 Key: aws.String(loc),
433 req := v.bucket.svc.HeadObjectRequest(input)
434 res, err := req.Send(context.TODO())
436 v.bucket.stats.TickOps("head")
437 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
438 v.bucket.stats.TickErr(err)
441 return nil, v.translateError(err)
443 result = res.HeadObjectOutput
447 // Get a block: copy the block data into buf, and return the number of
449 func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
450 return getWithPipe(ctx, loc, buf, v)
453 func (v *S3AWSVolume) readWorker(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
454 buf := make([]byte, 0, 67108864)
455 awsBuf := aws.NewWriteAtBuffer(buf)
457 downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
458 u.PartSize = PartSize
459 u.Concurrency = ReadConcurrency
462 v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
464 _, err = downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
465 Bucket: aws.String(v.bucket.bucket),
466 Key: aws.String(loc),
468 v.bucket.stats.TickOps("get")
469 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
470 v.bucket.stats.TickErr(err)
472 return nil, v.translateError(err)
476 rdr = NewCountingReader(bytes.NewReader(buf), v.bucket.stats.TickInBytes)
480 // ReadBlock implements BlockReader.
481 func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
482 rdr, err := v.readWorker(ctx, loc)
485 _, err2 := io.Copy(w, rdr)
492 err = v.translateError(err)
493 if !os.IsNotExist(err) {
497 _, err = v.Head("recent/" + loc)
498 err = v.translateError(err)
500 // If we can't read recent/X, there's no point in
501 // trying fixRace. Give up.
509 rdr, err = v.readWorker(ctx, loc)
511 v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
512 err = v.translateError(err)
516 _, err = io.Copy(w, rdr)
521 func (v *S3AWSVolume) writeObject(ctx context.Context, name string, r io.Reader) error {
523 // r == nil leads to a memory violation in func readFillBuf in
524 // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
525 r = bytes.NewReader(nil)
528 uploadInput := s3manager.UploadInput{
529 Bucket: aws.String(v.bucket.bucket),
530 Key: aws.String(name),
535 var contentMD5 string
536 md5, err := hex.DecodeString(name)
540 contentMD5 = base64.StdEncoding.EncodeToString(md5)
541 uploadInput.ContentMD5 = &contentMD5
544 // Experimentation indicated that using concurrency 5 yields the best
545 // throughput, better than higher concurrency (10 or 13) by ~5%.
546 // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
547 // is detrimental to througput (minus ~15%).
548 uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
549 u.PartSize = PartSize
550 u.Concurrency = WriteConcurrency
553 // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
554 // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
555 // block, so there is no extra memory use to be concerned about. See
556 // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
557 // calculating the Sha-256 because we don't need it; we already use md5sum
558 // hashes that match the name of the block.
559 _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
560 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
563 v.bucket.stats.TickOps("put")
564 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
565 v.bucket.stats.TickErr(err)
570 // Put writes a block.
571 func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
572 return putWithPipe(ctx, loc, block, v)
575 // WriteBlock implements BlockWriter.
576 func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
577 if v.volume.ReadOnly {
578 return MethodDisabledError
581 r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
582 err := v.writeObject(ctx, loc, r)
586 return v.writeObject(ctx, "recent/"+loc, nil)
589 type s3awsLister struct {
590 Logger logrus.FieldLogger
594 Stats *s3awsbucketStats
595 ContinuationToken string
600 // First fetches the first page and returns the first item. It returns
601 // nil if the response is the empty set or an error occurs.
602 func (lister *s3awsLister) First() *s3.Object {
607 // Next returns the next item, fetching the next page if necessary. It
608 // returns nil if the last available item has already been fetched, or
610 func (lister *s3awsLister) Next() *s3.Object {
611 if len(lister.buf) == 0 && lister.ContinuationToken != "" {
617 // Return the most recent error encountered by First or Next.
618 func (lister *s3awsLister) Error() error {
622 func (lister *s3awsLister) getPage() {
623 lister.Stats.TickOps("list")
624 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
626 var input *s3.ListObjectsV2Input
627 if lister.ContinuationToken == "" {
628 input = &s3.ListObjectsV2Input{
629 Bucket: aws.String(lister.Bucket.bucket),
630 MaxKeys: aws.Int64(int64(lister.PageSize)),
631 Prefix: aws.String(lister.Prefix),
634 input = &s3.ListObjectsV2Input{
635 Bucket: aws.String(lister.Bucket.bucket),
636 MaxKeys: aws.Int64(int64(lister.PageSize)),
637 Prefix: aws.String(lister.Prefix),
638 ContinuationToken: &lister.ContinuationToken,
642 req := lister.Bucket.svc.ListObjectsV2Request(input)
643 resp, err := req.Send(context.Background())
645 if aerr, ok := err.(awserr.Error); ok {
653 if *resp.IsTruncated {
654 lister.ContinuationToken = *resp.NextContinuationToken
656 lister.ContinuationToken = ""
658 lister.buf = make([]s3.Object, 0, len(resp.Contents))
659 for _, key := range resp.Contents {
660 if !strings.HasPrefix(*key.Key, lister.Prefix) {
661 lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
664 lister.buf = append(lister.buf, key)
668 func (lister *s3awsLister) pop() (k *s3.Object) {
669 if len(lister.buf) > 0 {
671 lister.buf = lister.buf[1:]
676 // IndexTo writes a complete list of locators with the given prefix
677 // for which Get() can retrieve data.
678 func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
679 // Use a merge sort to find matching sets of X and recent/X.
680 dataL := s3awsLister{
684 PageSize: v.IndexPageSize,
685 Stats: &v.bucket.stats,
687 recentL := s3awsLister{
690 Prefix: "recent/" + prefix,
691 PageSize: v.IndexPageSize,
692 Stats: &v.bucket.stats,
694 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
695 if *data.Key >= "g" {
696 // Conveniently, "recent/*" and "trash/*" are
697 // lexically greater than all hex-encoded data
698 // hashes, so stopping here avoids iterating
699 // over all of them needlessly with dataL.
702 if !v.isKeepBlock(*data.Key) {
706 // stamp is the list entry we should use to report the
707 // last-modified time for this data block: it will be
708 // the recent/X entry if one exists, otherwise the
709 // entry for the data block itself.
712 // Advance to the corresponding recent/X marker, if any
713 for recent != nil && recentL.Error() == nil {
714 if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
715 recent = recentL.Next()
719 recent = recentL.Next()
722 // recent/X marker is missing: we'll
723 // use the timestamp on the data
728 if err := recentL.Error(); err != nil {
731 fmt.Fprintf(writer, "%s+%d %d\n", *data.Key, *data.Size, stamp.LastModified.UnixNano())
736 // Mtime returns the stored timestamp for the given locator.
737 func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
738 _, err := v.Head(loc)
740 return s3AWSZeroTime, v.translateError(err)
742 resp, err := v.Head("recent/" + loc)
743 err = v.translateError(err)
744 if os.IsNotExist(err) {
745 // The data object X exists, but recent/X is missing.
746 err = v.writeObject(context.Background(), "recent/"+loc, nil)
748 v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
749 return s3AWSZeroTime, v.translateError(err)
751 v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+loc)
752 resp, err = v.Head("recent/" + loc)
754 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
755 return s3AWSZeroTime, v.translateError(err)
757 } else if err != nil {
758 // HEAD recent/X failed for some other reason.
759 return s3AWSZeroTime, err
761 return *resp.LastModified, err
764 // Status returns a *VolumeStatus representing the current in-use
765 // storage capacity and a fake available capacity that doesn't make
766 // the volume seem full or nearly-full.
767 func (v *S3AWSVolume) Status() *VolumeStatus {
768 return &VolumeStatus{
770 BytesFree: BlockSize * 1000,
775 // InternalStats returns bucket I/O and API call counters.
776 func (v *S3AWSVolume) InternalStats() interface{} {
777 return &v.bucket.stats
780 // Touch sets the timestamp for the given locator to the current time.
781 func (v *S3AWSVolume) Touch(loc string) error {
782 if v.volume.ReadOnly {
783 return MethodDisabledError
785 _, err := v.Head(loc)
786 err = v.translateError(err)
787 if os.IsNotExist(err) && v.fixRace(loc) {
788 // The data object got trashed in a race, but fixRace
790 } else if err != nil {
793 err = v.writeObject(context.Background(), "recent/"+loc, nil)
794 return v.translateError(err)
797 // checkRaceWindow returns a non-nil error if trash/loc is, or might
798 // be, in the race window (i.e., it's not safe to trash loc).
799 func (v *S3AWSVolume) checkRaceWindow(loc string) error {
800 resp, err := v.Head("trash/" + loc)
801 err = v.translateError(err)
802 if os.IsNotExist(err) {
803 // OK, trash/X doesn't exist so we're not in the race
806 } else if err != nil {
807 // Error looking up trash/X. We don't know whether
808 // we're in the race window
811 t := resp.LastModified
812 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
814 // We can't count on "touch trash/X" to prolong
815 // trash/X's lifetime. The new timestamp might not
816 // become visible until now+raceWindow, and EmptyTrash
817 // is allowed to delete trash/X before then.
818 return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
820 // trash/X exists, but it won't be eligible for deletion until
821 // after now+raceWindow, so it's safe to overwrite it.
825 func (b *s3AWSbucket) Del(path string) error {
826 input := &s3.DeleteObjectInput{
827 Bucket: aws.String(b.bucket),
828 Key: aws.String(path),
830 req := b.svc.DeleteObjectRequest(input)
831 _, err := req.Send(context.Background())
832 //err := b.Bucket().Del(path)
833 b.stats.TickOps("delete")
834 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
839 // Trash a Keep block.
840 func (v *S3AWSVolume) Trash(loc string) error {
841 if v.volume.ReadOnly {
842 return MethodDisabledError
844 if t, err := v.Mtime(loc); err != nil {
846 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
849 if v.cluster.Collections.BlobTrashLifetime == 0 {
851 return ErrS3TrashDisabled
853 return v.translateError(v.bucket.Del(loc))
855 err := v.checkRaceWindow(loc)
859 err = v.safeCopy("trash/"+loc, loc)
863 return v.translateError(v.bucket.Del(loc))
866 // Untrash moves block from trash back into store
867 func (v *S3AWSVolume) Untrash(loc string) error {
868 err := v.safeCopy(loc, "trash/"+loc)
872 err = v.writeObject(context.Background(), "recent/"+loc, nil)
873 return v.translateError(err)
876 type s3awsbucketStats struct {
886 func (s *s3awsbucketStats) TickErr(err error) {
890 errType := fmt.Sprintf("%T", err)
891 if aerr, ok := err.(awserr.Error); ok {
892 if reqErr, ok := err.(awserr.RequestFailure); ok {
893 // A service error occurred
894 errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
896 errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
899 s.statsTicker.TickErr(err, errType)