1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/aws/awserr"
26 "github.com/aws/aws-sdk-go-v2/aws/defaults"
27 "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
28 "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
29 "github.com/aws/aws-sdk-go-v2/aws/endpoints"
30 "github.com/aws/aws-sdk-go-v2/service/s3"
31 "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
32 "github.com/prometheus/client_golang/prometheus"
33 "github.com/sirupsen/logrus"
37 driver["S3"] = newS3AWSVolume
41 s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
42 s3DefaultConnectTimeout = arvados.Duration(time.Minute)
43 maxClockSkew = 600 * time.Second
44 nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
45 s3downloaderPartSize = 5 * 1024 * 1024
46 s3downloaderReadConcurrency = 13
47 s3uploaderPartSize = 5 * 1024 * 1024
48 s3uploaderWriteConcurrency = 5
52 ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
55 // S3AWSVolume implements Volume using an S3 bucket.
56 type S3AWSVolume struct {
57 arvados.S3VolumeDriverParameters
58 AuthToken string // populated automatically when IAMRole is used
59 AuthExpiration time.Time // populated automatically when IAMRole is used
61 cluster *arvados.Cluster
63 logger logrus.FieldLogger
64 metrics *volumeMetricsVecs
65 bufferPool *bufferPool
71 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
72 // wrapped bucket can be replaced atomically with SetBucket in order
73 // to update credentials.
74 type s3AWSbucket struct {
77 stats s3awsbucketStats
83 var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
84 var s3AWSZeroTime time.Time
86 func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
87 if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
88 s = s[v.PrefixLength+1:]
90 return s, s3AWSKeepBlockRegexp.MatchString(s)
93 // Return the key used for a given loc. If PrefixLength==0 then
94 // key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
95 // "abc/abcdef0123", etc.
96 func (v *S3AWSVolume) key(loc string) string {
97 if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
98 return loc[:v.PrefixLength] + "/" + loc
104 func newS3AWSVolume(params newVolumeParams) (volume, error) {
106 cluster: params.Cluster,
107 volume: params.ConfigVolume,
108 metrics: params.MetricsVecs,
109 bufferPool: params.BufferPool,
111 err := json.Unmarshal(params.ConfigVolume.DriverParameters, v)
115 v.logger = params.Logger.WithField("Volume", v.DeviceID())
116 return v, v.check("")
119 func (v *S3AWSVolume) translateError(err error) error {
120 if _, ok := err.(*aws.RequestCanceledError); ok {
121 return context.Canceled
122 } else if aerr, ok := err.(awserr.Error); ok {
123 if aerr.Code() == "NotFound" {
124 return os.ErrNotExist
125 } else if aerr.Code() == "NoSuchKey" {
126 return os.ErrNotExist
132 // safeCopy calls CopyObjectRequest, and checks the response to make
133 // sure the copy succeeded and updated the timestamp on the
134 // destination object
136 // (If something goes wrong during the copy, the error will be
137 // embedded in the 200 OK response)
138 func (v *S3AWSVolume) safeCopy(dst, src string) error {
139 input := &s3.CopyObjectInput{
140 Bucket: aws.String(v.bucket.bucket),
141 ContentType: aws.String("application/octet-stream"),
142 CopySource: aws.String(v.bucket.bucket + "/" + src),
143 Key: aws.String(dst),
146 req := v.bucket.svc.CopyObjectRequest(input)
147 resp, err := req.Send(context.Background())
149 err = v.translateError(err)
150 if os.IsNotExist(err) {
152 } else if err != nil {
153 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
156 if resp.CopyObjectResult.LastModified == nil {
157 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
158 } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
159 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
164 func (v *S3AWSVolume) check(ec2metadataHostname string) error {
166 return errors.New("DriverParameters: Bucket must be provided")
168 if v.IndexPageSize == 0 {
169 v.IndexPageSize = 1000
171 if v.RaceWindow < 0 {
172 return errors.New("DriverParameters: RaceWindow must not be negative")
176 return errors.New("DriverParameters: V2Signature is not supported")
179 defaultResolver := endpoints.NewDefaultResolver()
181 cfg := defaults.Config()
183 if v.Endpoint == "" && v.Region == "" {
184 return fmt.Errorf("AWS region or endpoint must be specified")
185 } else if v.Endpoint != "" || ec2metadataHostname != "" {
186 myCustomResolver := func(service, region string) (aws.Endpoint, error) {
187 if v.Endpoint != "" && service == "s3" {
190 SigningRegion: region,
192 } else if service == "ec2metadata" && ec2metadataHostname != "" {
194 URL: ec2metadataHostname,
197 return defaultResolver.ResolveEndpoint(service, region)
200 cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
203 // Endpoint is already specified (otherwise we would
204 // have errored out above), but Region is also
205 // required by the aws sdk, in order to determine
206 // SignatureVersions.
207 v.Region = "us-east-1"
209 cfg.Region = v.Region
211 // Zero timeouts mean "wait forever", which is a bad
212 // default. Default to long timeouts instead.
213 if v.ConnectTimeout == 0 {
214 v.ConnectTimeout = s3DefaultConnectTimeout
216 if v.ReadTimeout == 0 {
217 v.ReadTimeout = s3DefaultReadTimeout
220 creds := aws.NewChainProvider(
221 []aws.CredentialsProvider{
222 aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
223 ec2rolecreds.New(ec2metadata.New(cfg)),
226 cfg.Credentials = creds
228 v.bucket = &s3AWSbucket{
233 // Set up prometheus metrics
234 lbls := prometheus.Labels{"device_id": v.DeviceID()}
235 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
240 // DeviceID returns a globally unique ID for the storage bucket.
241 func (v *S3AWSVolume) DeviceID() string {
242 return "s3://" + v.Endpoint + "/" + v.Bucket
245 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
246 // and deletes them from the volume.
247 func (v *S3AWSVolume) EmptyTrash() {
248 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
250 // Define "ready to delete" as "...when EmptyTrash started".
253 emptyOneKey := func(trash *s3.Object) {
254 key := strings.TrimPrefix(*trash.Key, "trash/")
255 loc, isblk := v.isKeepBlock(key)
259 atomic.AddInt64(&bytesInTrash, *trash.Size)
260 atomic.AddInt64(&blocksInTrash, 1)
262 trashT := *trash.LastModified
263 recent, err := v.head("recent/" + key)
264 if err != nil && os.IsNotExist(v.translateError(err)) {
265 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
266 err = v.BlockUntrash(loc)
268 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
271 } else if err != nil {
272 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
275 if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
276 if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
277 // recent/key is too old to protect
278 // loc from being Trashed again during
279 // the raceWindow that starts if we
280 // delete trash/X now.
282 // Note this means (TrashSweepInterval
283 // < BlobSigningTTL - raceWindow) is
284 // necessary to avoid starvation.
285 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
290 _, err := v.head(key)
291 if os.IsNotExist(err) {
292 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
295 } else if err != nil {
296 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
300 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
303 err = v.bucket.Del(*trash.Key)
305 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
308 atomic.AddInt64(&bytesDeleted, *trash.Size)
309 atomic.AddInt64(&blocksDeleted, 1)
311 _, err = v.head(*trash.Key)
313 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
316 if !os.IsNotExist(v.translateError(err)) {
317 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
320 err = v.bucket.Del("recent/" + key)
322 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
326 var wg sync.WaitGroup
327 todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
328 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
332 for key := range todo {
338 trashL := s3awsLister{
342 PageSize: v.IndexPageSize,
343 Stats: &v.bucket.stats,
345 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
351 if err := trashL.Error(); err != nil {
352 v.logger.WithError(err).Error("EmptyTrash: lister failed")
354 v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
357 // fixRace(X) is called when "recent/X" exists but "X" doesn't
358 // exist. If the timestamps on "recent/X" and "trash/X" indicate there
359 // was a race between Put and Trash, fixRace recovers from the race by
360 // Untrashing the block.
361 func (v *S3AWSVolume) fixRace(key string) bool {
362 trash, err := v.head("trash/" + key)
364 if !os.IsNotExist(v.translateError(err)) {
365 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
370 recent, err := v.head("recent/" + key)
372 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
376 recentTime := *recent.LastModified
377 trashTime := *trash.LastModified
378 ageWhenTrashed := trashTime.Sub(recentTime)
379 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
380 // No evidence of a race: block hasn't been written
381 // since it became eligible for Trash. No fix needed.
385 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
386 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
387 err = v.safeCopy(key, "trash/"+key)
389 v.logger.WithError(err).Error("fixRace: copy failed")
395 func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error) {
396 input := &s3.HeadObjectInput{
397 Bucket: aws.String(v.bucket.bucket),
398 Key: aws.String(key),
401 req := v.bucket.svc.HeadObjectRequest(input)
402 res, err := req.Send(context.TODO())
404 v.bucket.stats.TickOps("head")
405 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
406 v.bucket.stats.TickErr(err)
409 return nil, v.translateError(err)
411 result = res.HeadObjectOutput
415 // BlockRead reads a Keep block that has been stored as a block blob
417 func (v *S3AWSVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
419 buf, err := v.bufferPool.GetContext(ctx)
423 defer v.bufferPool.Put(buf)
425 streamer := newStreamWriterAt(writeTo, 65536, buf)
426 defer streamer.Close()
427 err = v.readWorker(ctx, key, streamer)
429 err = v.translateError(err)
430 if !os.IsNotExist(err) {
433 if streamer.WroteAt() > 0 {
434 return 0, errors.New("bug? readWorker returned ErrNotExist after writing to streamer")
437 _, err = v.head("recent/" + key)
438 err = v.translateError(err)
440 // If we can't read recent/X, there's no point in
441 // trying fixRace. Give up.
449 err = v.readWorker(ctx, key, streamer)
451 v.logger.Warnf("reading %s after successful fixRace: %s", hash, err)
452 err = v.translateError(err)
456 err = streamer.Close()
458 return 0, v.translateError(err)
460 return streamer.Wrote(), nil
463 func (v *S3AWSVolume) readWorker(ctx context.Context, key string, dst io.WriterAt) error {
464 downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
465 u.PartSize = s3downloaderPartSize
466 u.Concurrency = s3downloaderReadConcurrency
468 count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{
469 Bucket: aws.String(v.bucket.bucket),
470 Key: aws.String(key),
472 v.bucket.stats.TickOps("get")
473 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
474 v.bucket.stats.TickErr(err)
475 v.bucket.stats.TickInBytes(uint64(count))
476 return v.translateError(err)
479 func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader) error {
481 // r == nil leads to a memory violation in func readFillBuf in
482 // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
483 r = bytes.NewReader(nil)
486 uploadInput := s3manager.UploadInput{
487 Bucket: aws.String(v.bucket.bucket),
488 Key: aws.String(key),
492 if loc, ok := v.isKeepBlock(key); ok {
493 var contentMD5 string
494 md5, err := hex.DecodeString(loc)
496 return v.translateError(err)
498 contentMD5 = base64.StdEncoding.EncodeToString(md5)
499 uploadInput.ContentMD5 = &contentMD5
502 // Experimentation indicated that using concurrency 5 yields the best
503 // throughput, better than higher concurrency (10 or 13) by ~5%.
504 // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
505 // is detrimental to throughput (minus ~15%).
506 uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
507 u.PartSize = s3uploaderPartSize
508 u.Concurrency = s3uploaderWriteConcurrency
511 // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
512 // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
513 // block, so there is no extra memory use to be concerned about. See
514 // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
515 // calculating the Sha-256 because we don't need it; we already use md5sum
516 // hashes that match the name of the block.
517 _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
518 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
521 v.bucket.stats.TickOps("put")
522 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
523 v.bucket.stats.TickErr(err)
525 return v.translateError(err)
528 // Put writes a block.
529 func (v *S3AWSVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
530 // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
531 // sdk to avoid memory allocation there. See #17339 for more information.
532 rdr := bytes.NewReader(data)
533 r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
535 err := v.writeObject(ctx, key, r)
539 return v.writeObject(ctx, "recent/"+key, nil)
542 type s3awsLister struct {
543 Logger logrus.FieldLogger
547 Stats *s3awsbucketStats
548 ContinuationToken string
553 // First fetches the first page and returns the first item. It returns
554 // nil if the response is the empty set or an error occurs.
555 func (lister *s3awsLister) First() *s3.Object {
560 // Next returns the next item, fetching the next page if necessary. It
561 // returns nil if the last available item has already been fetched, or
563 func (lister *s3awsLister) Next() *s3.Object {
564 if len(lister.buf) == 0 && lister.ContinuationToken != "" {
570 // Return the most recent error encountered by First or Next.
571 func (lister *s3awsLister) Error() error {
575 func (lister *s3awsLister) getPage() {
576 lister.Stats.TickOps("list")
577 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
579 var input *s3.ListObjectsV2Input
580 if lister.ContinuationToken == "" {
581 input = &s3.ListObjectsV2Input{
582 Bucket: aws.String(lister.Bucket.bucket),
583 MaxKeys: aws.Int64(int64(lister.PageSize)),
584 Prefix: aws.String(lister.Prefix),
587 input = &s3.ListObjectsV2Input{
588 Bucket: aws.String(lister.Bucket.bucket),
589 MaxKeys: aws.Int64(int64(lister.PageSize)),
590 Prefix: aws.String(lister.Prefix),
591 ContinuationToken: &lister.ContinuationToken,
595 req := lister.Bucket.svc.ListObjectsV2Request(input)
596 resp, err := req.Send(context.Background())
598 if aerr, ok := err.(awserr.Error); ok {
606 if *resp.IsTruncated {
607 lister.ContinuationToken = *resp.NextContinuationToken
609 lister.ContinuationToken = ""
611 lister.buf = make([]s3.Object, 0, len(resp.Contents))
612 for _, key := range resp.Contents {
613 if !strings.HasPrefix(*key.Key, lister.Prefix) {
614 lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
617 lister.buf = append(lister.buf, key)
621 func (lister *s3awsLister) pop() (k *s3.Object) {
622 if len(lister.buf) > 0 {
624 lister.buf = lister.buf[1:]
629 // Index writes a complete list of locators with the given prefix
630 // for which Get() can retrieve data.
631 func (v *S3AWSVolume) Index(ctx context.Context, prefix string, writer io.Writer) error {
632 prefix = v.key(prefix)
633 // Use a merge sort to find matching sets of X and recent/X.
634 dataL := s3awsLister{
638 PageSize: v.IndexPageSize,
639 Stats: &v.bucket.stats,
641 recentL := s3awsLister{
644 Prefix: "recent/" + prefix,
645 PageSize: v.IndexPageSize,
646 Stats: &v.bucket.stats,
648 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
649 if ctx.Err() != nil {
652 if *data.Key >= "g" {
653 // Conveniently, "recent/*" and "trash/*" are
654 // lexically greater than all hex-encoded data
655 // hashes, so stopping here avoids iterating
656 // over all of them needlessly with dataL.
659 loc, isblk := v.isKeepBlock(*data.Key)
664 // stamp is the list entry we should use to report the
665 // last-modified time for this data block: it will be
666 // the recent/X entry if one exists, otherwise the
667 // entry for the data block itself.
670 // Advance to the corresponding recent/X marker, if any
671 for recent != nil && recentL.Error() == nil {
672 if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
673 recent = recentL.Next()
677 recent = recentL.Next()
680 // recent/X marker is missing: we'll
681 // use the timestamp on the data
686 if err := recentL.Error(); err != nil {
689 // We truncate sub-second precision here. Otherwise
690 // timestamps will never match the RFC1123-formatted
691 // Last-Modified values parsed by Mtime().
692 fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
697 // Mtime returns the stored timestamp for the given locator.
698 func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
700 _, err := v.head(key)
702 return s3AWSZeroTime, v.translateError(err)
704 resp, err := v.head("recent/" + key)
705 err = v.translateError(err)
706 if os.IsNotExist(err) {
707 // The data object X exists, but recent/X is missing.
708 err = v.writeObject(context.Background(), "recent/"+key, nil)
710 v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
711 return s3AWSZeroTime, v.translateError(err)
713 v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
714 resp, err = v.head("recent/" + key)
716 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
717 return s3AWSZeroTime, v.translateError(err)
719 } else if err != nil {
720 // HEAD recent/X failed for some other reason.
721 return s3AWSZeroTime, err
723 return *resp.LastModified, err
726 // InternalStats returns bucket I/O and API call counters.
727 func (v *S3AWSVolume) InternalStats() interface{} {
728 return &v.bucket.stats
731 // BlockTouch sets the timestamp for the given locator to the current time.
732 func (v *S3AWSVolume) BlockTouch(hash string) error {
734 _, err := v.head(key)
735 err = v.translateError(err)
736 if os.IsNotExist(err) && v.fixRace(key) {
737 // The data object got trashed in a race, but fixRace
739 } else if err != nil {
742 err = v.writeObject(context.Background(), "recent/"+key, nil)
743 return v.translateError(err)
746 // checkRaceWindow returns a non-nil error if trash/key is, or might
747 // be, in the race window (i.e., it's not safe to trash key).
748 func (v *S3AWSVolume) checkRaceWindow(key string) error {
749 resp, err := v.head("trash/" + key)
750 err = v.translateError(err)
751 if os.IsNotExist(err) {
752 // OK, trash/X doesn't exist so we're not in the race
755 } else if err != nil {
756 // Error looking up trash/X. We don't know whether
757 // we're in the race window
760 t := resp.LastModified
761 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
763 // We can't count on "touch trash/X" to prolong
764 // trash/X's lifetime. The new timestamp might not
765 // become visible until now+raceWindow, and EmptyTrash
766 // is allowed to delete trash/X before then.
767 return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
769 // trash/X exists, but it won't be eligible for deletion until
770 // after now+raceWindow, so it's safe to overwrite it.
774 func (b *s3AWSbucket) Del(path string) error {
775 input := &s3.DeleteObjectInput{
776 Bucket: aws.String(b.bucket),
777 Key: aws.String(path),
779 req := b.svc.DeleteObjectRequest(input)
780 _, err := req.Send(context.Background())
781 b.stats.TickOps("delete")
782 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
787 // Trash a Keep block.
788 func (v *S3AWSVolume) BlockTrash(loc string) error {
789 if t, err := v.Mtime(loc); err != nil {
791 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
795 if v.cluster.Collections.BlobTrashLifetime == 0 {
797 return ErrS3TrashDisabled
799 return v.translateError(v.bucket.Del(key))
801 err := v.checkRaceWindow(key)
805 err = v.safeCopy("trash/"+key, key)
809 return v.translateError(v.bucket.Del(key))
812 // BlockUntrash moves block from trash back into store
813 func (v *S3AWSVolume) BlockUntrash(hash string) error {
815 err := v.safeCopy(key, "trash/"+key)
819 err = v.writeObject(context.Background(), "recent/"+key, nil)
820 return v.translateError(err)
823 type s3awsbucketStats struct {
833 func (s *s3awsbucketStats) TickErr(err error) {
837 errType := fmt.Sprintf("%T", err)
838 if aerr, ok := err.(awserr.Error); ok {
839 if reqErr, ok := err.(awserr.RequestFailure); ok {
840 // A service error occurred
841 errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
843 errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
846 s.statsTicker.TickErr(err, errType)