1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/aws/awserr"
26 "github.com/aws/aws-sdk-go-v2/aws/defaults"
27 "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
28 "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
29 "github.com/aws/aws-sdk-go-v2/aws/endpoints"
30 "github.com/aws/aws-sdk-go-v2/service/s3"
31 "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
32 "github.com/prometheus/client_golang/prometheus"
33 "github.com/sirupsen/logrus"
36 // S3AWSVolume implements Volume using an S3 bucket.
37 type S3AWSVolume struct {
38 arvados.S3VolumeDriverParameters
39 AuthToken string // populated automatically when IAMRole is used
40 AuthExpiration time.Time // populated automatically when IAMRole is used
42 cluster *arvados.Cluster
44 logger logrus.FieldLogger
45 metrics *volumeMetricsVecs
51 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
52 // wrapped bucket can be replaced atomically with SetBucket in order
53 // to update credentials.
54 type s3AWSbucket struct {
57 stats s3awsbucketStats
61 // chooseS3VolumeDriver distinguishes between the old goamz driver and
62 // aws-sdk-go based on the UseAWSS3v2Driver feature flag
63 func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
64 v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
65 err := json.Unmarshal(volume.DriverParameters, v)
69 if v.UseAWSS3v2Driver {
70 logger.Debugln("Using AWS S3 v2 driver")
71 return newS3AWSVolume(cluster, volume, logger, metrics)
73 logger.Debugln("Using goamz S3 driver")
74 return newS3Volume(cluster, volume, logger, metrics)
78 PartSize = 5 * 1024 * 1024
83 var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
84 var s3AWSZeroTime time.Time
86 func (v *S3AWSVolume) isKeepBlock(s string) bool {
87 return s3AWSKeepBlockRegexp.MatchString(s)
90 func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
91 v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
92 err := json.Unmarshal(volume.DriverParameters, v)
96 v.logger = logger.WithField("Volume", v.String())
100 func (v *S3AWSVolume) translateError(err error) error {
101 if aerr, ok := err.(awserr.Error); ok {
104 return os.ErrNotExist
106 return os.ErrNotExist
112 // safeCopy calls CopyObjectRequest, and checks the response to make sure the
113 // copy succeeded and updated the timestamp on the destination object
115 // (If something goes wrong during the copy, the error will be embedded in the
117 func (v *S3AWSVolume) safeCopy(dst, src string) error {
118 input := &s3.CopyObjectInput{
119 Bucket: aws.String(v.bucket.bucket),
120 ContentType: aws.String("application/octet-stream"),
121 CopySource: aws.String(v.bucket.bucket + "/" + src),
122 Key: aws.String(dst),
125 req := v.bucket.svc.CopyObjectRequest(input)
126 resp, err := req.Send(context.Background())
128 err = v.translateError(err)
129 if os.IsNotExist(err) {
131 } else if err != nil {
132 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
135 if resp.CopyObjectResult.LastModified == nil {
136 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
137 } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
138 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
143 func (v *S3AWSVolume) check(ec2metadataHostname string) error {
145 return errors.New("DriverParameters: Bucket must be provided")
147 if v.IndexPageSize == 0 {
148 v.IndexPageSize = 1000
150 if v.RaceWindow < 0 {
151 return errors.New("DriverParameters: RaceWindow must not be negative")
155 return errors.New("DriverParameters: V2Signature is not supported")
158 defaultResolver := endpoints.NewDefaultResolver()
160 cfg := defaults.Config()
162 if v.Endpoint == "" && v.Region == "" {
163 return fmt.Errorf("AWS region or endpoint must be specified")
164 } else if v.Endpoint != "" || ec2metadataHostname != "" {
165 myCustomResolver := func(service, region string) (aws.Endpoint, error) {
166 if v.Endpoint != "" && service == "s3" {
169 SigningRegion: v.Region,
171 } else if service == "ec2metadata" && ec2metadataHostname != "" {
173 URL: ec2metadataHostname,
177 return defaultResolver.ResolveEndpoint(service, region)
179 cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
182 cfg.Region = v.Region
184 // Zero timeouts mean "wait forever", which is a bad
185 // default. Default to long timeouts instead.
186 if v.ConnectTimeout == 0 {
187 v.ConnectTimeout = s3DefaultConnectTimeout
189 if v.ReadTimeout == 0 {
190 v.ReadTimeout = s3DefaultReadTimeout
193 creds := aws.NewChainProvider(
194 []aws.CredentialsProvider{
195 aws.NewStaticCredentialsProvider(v.AccessKey, v.SecretKey, v.AuthToken),
196 ec2rolecreds.New(ec2metadata.New(cfg)),
199 cfg.Credentials = creds
201 v.bucket = &s3AWSbucket{
206 // Set up prometheus metrics
207 lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
208 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
213 // String implements fmt.Stringer.
214 func (v *S3AWSVolume) String() string {
215 return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
218 // GetDeviceID returns a globally unique ID for the storage bucket.
219 func (v *S3AWSVolume) GetDeviceID() string {
220 return "s3://" + v.Endpoint + "/" + v.Bucket
223 // Compare the given data with the stored data.
224 func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
225 errChan := make(chan error, 1)
227 _, err := v.Head("recent/" + loc)
234 case err = <-errChan:
237 // Checking for "loc" itself here would interfere with
238 // future GET requests.
240 // On AWS, if X doesn't exist, a HEAD or GET request
241 // for X causes X's non-existence to be cached. Thus,
242 // if we test for X, then create X and return a
243 // signature to our client, the client might still get
244 // 404 from all keepstores when trying to read it.
246 // To avoid this, we avoid doing HEAD X or GET X until
247 // we know X has been written.
249 // Note that X might exist even though recent/X
250 // doesn't: for example, the response to HEAD recent/X
251 // might itself come from a stale cache. In such
252 // cases, we will return a false negative and
253 // PutHandler might needlessly create another replica
254 // on a different volume. That's not ideal, but it's
255 // better than passing the eventually-consistent
256 // problem on to our clients.
257 return v.translateError(err)
260 input := &s3.GetObjectInput{
261 Bucket: aws.String(v.bucket.bucket),
262 Key: aws.String(loc),
265 req := v.bucket.svc.GetObjectRequest(input)
266 result, err := req.Send(ctx)
268 return v.translateError(err)
270 return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
273 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
274 // and deletes them from the volume.
275 func (v *S3AWSVolume) EmptyTrash() {
276 if v.cluster.Collections.BlobDeleteConcurrency < 1 {
280 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
282 // Define "ready to delete" as "...when EmptyTrash started".
285 emptyOneKey := func(trash *s3.Object) {
286 loc := strings.TrimPrefix(*trash.Key, "trash/")
287 if !v.isKeepBlock(loc) {
290 atomic.AddInt64(&bytesInTrash, *trash.Size)
291 atomic.AddInt64(&blocksInTrash, 1)
293 trashT := *(trash.LastModified)
294 recent, err := v.Head("recent/" + loc)
295 if err != nil && os.IsNotExist(v.translateError(err)) {
296 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
299 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
302 } else if err != nil {
303 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
306 if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
307 if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
308 // recent/loc is too old to protect
309 // loc from being Trashed again during
310 // the raceWindow that starts if we
311 // delete trash/X now.
313 // Note this means (TrashSweepInterval
314 // < BlobSigningTTL - raceWindow) is
315 // necessary to avoid starvation.
316 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
321 _, err := v.Head(loc)
322 if os.IsNotExist(err) {
323 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
326 } else if err != nil {
327 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
331 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
334 err = v.bucket.Del(*trash.Key)
336 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
339 atomic.AddInt64(&bytesDeleted, *trash.Size)
340 atomic.AddInt64(&blocksDeleted, 1)
344 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
347 if !os.IsNotExist(v.translateError(err)) {
348 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
351 err = v.bucket.Del("recent/" + loc)
353 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
357 var wg sync.WaitGroup
358 todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
359 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
363 for key := range todo {
369 trashL := s3awsLister{
373 PageSize: v.IndexPageSize,
374 Stats: &v.bucket.stats,
376 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
382 if err := trashL.Error(); err != nil {
383 v.logger.WithError(err).Error("EmptyTrash: lister failed")
385 v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
388 // fixRace(X) is called when "recent/X" exists but "X" doesn't
389 // exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
390 // there was a race between Put and Trash, fixRace recovers from the
391 // race by Untrashing the block.
392 func (v *S3AWSVolume) fixRace(loc string) bool {
393 trash, err := v.Head("trash/" + loc)
395 if !os.IsNotExist(v.translateError(err)) {
396 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+loc)
401 recent, err := v.Head("recent/" + loc)
403 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+loc)
407 recentTime := *recent.LastModified
408 trashTime := *trash.LastModified
409 ageWhenTrashed := trashTime.Sub(recentTime)
410 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
411 // No evidence of a race: block hasn't been written
412 // since it became eligible for Trash. No fix needed.
416 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
417 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
418 err = v.safeCopy(loc, "trash/"+loc)
420 v.logger.WithError(err).Error("fixRace: copy failed")
426 func (v *S3AWSVolume) Head(loc string) (result *s3.HeadObjectOutput, err error) {
427 input := &s3.HeadObjectInput{
428 Bucket: aws.String(v.bucket.bucket),
429 Key: aws.String(loc),
432 req := v.bucket.svc.HeadObjectRequest(input)
433 res, err := req.Send(context.TODO())
435 v.bucket.stats.TickOps("head")
436 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
437 v.bucket.stats.TickErr(err)
440 return nil, v.translateError(err)
442 result = res.HeadObjectOutput
446 // Get a block: copy the block data into buf, and return the number of
448 func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
449 return getWithPipe(ctx, loc, buf, v)
452 func (v *S3AWSVolume) readWorker(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
453 buf := make([]byte, 0, 67108864)
454 awsBuf := aws.NewWriteAtBuffer(buf)
456 downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
457 u.PartSize = PartSize
458 u.Concurrency = ReadConcurrency
461 v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
463 _, err = downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
464 Bucket: aws.String(v.bucket.bucket),
465 Key: aws.String(loc),
467 v.bucket.stats.TickOps("get")
468 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
469 v.bucket.stats.TickErr(err)
471 return nil, v.translateError(err)
475 rdr = NewCountingReader(bytes.NewReader(buf), v.bucket.stats.TickInBytes)
479 // ReadBlock implements BlockReader.
480 func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
481 rdr, err := v.readWorker(ctx, loc)
484 _, err2 := io.Copy(w, rdr)
491 err = v.translateError(err)
492 if !os.IsNotExist(err) {
496 _, err = v.Head("recent/" + loc)
497 err = v.translateError(err)
499 // If we can't read recent/X, there's no point in
500 // trying fixRace. Give up.
508 rdr, err = v.readWorker(ctx, loc)
510 v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
511 err = v.translateError(err)
515 _, err = io.Copy(w, rdr)
520 func (v *S3AWSVolume) writeObject(ctx context.Context, name string, r io.Reader) error {
522 // r == nil leads to a memory violation in func readFillBuf in
523 // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
524 r = bytes.NewReader(nil)
527 uploadInput := s3manager.UploadInput{
528 Bucket: aws.String(v.bucket.bucket),
529 Key: aws.String(name),
534 var contentMD5 string
535 md5, err := hex.DecodeString(name)
539 contentMD5 = base64.StdEncoding.EncodeToString(md5)
540 uploadInput.ContentMD5 = &contentMD5
543 // Experimentation indicated that using concurrency 5 yields the best
544 // throughput, better than higher concurrency (10 or 13) by ~5%.
545 // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
546 // is detrimental to througput (minus ~15%).
547 uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
548 u.PartSize = PartSize
549 u.Concurrency = WriteConcurrency
552 // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
553 // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
554 // block, so there is no extra memory use to be concerned about. See
555 // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
556 // calculating the Sha-256 because we don't need it; we already use md5sum
557 // hashes that match the name of the block.
558 _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
559 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
562 v.bucket.stats.TickOps("put")
563 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
564 v.bucket.stats.TickErr(err)
569 // Put writes a block.
570 func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
571 return putWithPipe(ctx, loc, block, v)
574 // WriteBlock implements BlockWriter.
575 func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
576 if v.volume.ReadOnly {
577 return MethodDisabledError
580 r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
581 err := v.writeObject(ctx, loc, r)
585 return v.writeObject(ctx, "recent/"+loc, nil)
588 type s3awsLister struct {
589 Logger logrus.FieldLogger
593 Stats *s3awsbucketStats
594 ContinuationToken string
599 // First fetches the first page and returns the first item. It returns
600 // nil if the response is the empty set or an error occurs.
601 func (lister *s3awsLister) First() *s3.Object {
606 // Next returns the next item, fetching the next page if necessary. It
607 // returns nil if the last available item has already been fetched, or
609 func (lister *s3awsLister) Next() *s3.Object {
610 if len(lister.buf) == 0 && lister.ContinuationToken != "" {
616 // Return the most recent error encountered by First or Next.
617 func (lister *s3awsLister) Error() error {
621 func (lister *s3awsLister) getPage() {
622 lister.Stats.TickOps("list")
623 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
625 var input *s3.ListObjectsV2Input
626 if lister.ContinuationToken == "" {
627 input = &s3.ListObjectsV2Input{
628 Bucket: aws.String(lister.Bucket.bucket),
629 MaxKeys: aws.Int64(int64(lister.PageSize)),
630 Prefix: aws.String(lister.Prefix),
633 input = &s3.ListObjectsV2Input{
634 Bucket: aws.String(lister.Bucket.bucket),
635 MaxKeys: aws.Int64(int64(lister.PageSize)),
636 Prefix: aws.String(lister.Prefix),
637 ContinuationToken: &lister.ContinuationToken,
641 req := lister.Bucket.svc.ListObjectsV2Request(input)
642 resp, err := req.Send(context.Background())
644 if aerr, ok := err.(awserr.Error); ok {
652 if *resp.IsTruncated {
653 lister.ContinuationToken = *resp.NextContinuationToken
655 lister.ContinuationToken = ""
657 lister.buf = make([]s3.Object, 0, len(resp.Contents))
658 for _, key := range resp.Contents {
659 if !strings.HasPrefix(*key.Key, lister.Prefix) {
660 lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
663 lister.buf = append(lister.buf, key)
667 func (lister *s3awsLister) pop() (k *s3.Object) {
668 if len(lister.buf) > 0 {
670 lister.buf = lister.buf[1:]
675 // IndexTo writes a complete list of locators with the given prefix
676 // for which Get() can retrieve data.
677 func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
678 // Use a merge sort to find matching sets of X and recent/X.
679 dataL := s3awsLister{
683 PageSize: v.IndexPageSize,
684 Stats: &v.bucket.stats,
686 recentL := s3awsLister{
689 Prefix: "recent/" + prefix,
690 PageSize: v.IndexPageSize,
691 Stats: &v.bucket.stats,
693 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
694 if *data.Key >= "g" {
695 // Conveniently, "recent/*" and "trash/*" are
696 // lexically greater than all hex-encoded data
697 // hashes, so stopping here avoids iterating
698 // over all of them needlessly with dataL.
701 if !v.isKeepBlock(*data.Key) {
705 // stamp is the list entry we should use to report the
706 // last-modified time for this data block: it will be
707 // the recent/X entry if one exists, otherwise the
708 // entry for the data block itself.
711 // Advance to the corresponding recent/X marker, if any
712 for recent != nil && recentL.Error() == nil {
713 if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
714 recent = recentL.Next()
718 recent = recentL.Next()
721 // recent/X marker is missing: we'll
722 // use the timestamp on the data
727 if err := recentL.Error(); err != nil {
730 // We truncate sub-second precision here. Otherwise
731 // timestamps will never match the RFC1123-formatted
732 // Last-Modified values parsed by Mtime().
733 fmt.Fprintf(writer, "%s+%d %d\n", *data.Key, *data.Size, stamp.LastModified.Unix()*1000000000)
738 // Mtime returns the stored timestamp for the given locator.
739 func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
740 _, err := v.Head(loc)
742 return s3AWSZeroTime, v.translateError(err)
744 resp, err := v.Head("recent/" + loc)
745 err = v.translateError(err)
746 if os.IsNotExist(err) {
747 // The data object X exists, but recent/X is missing.
748 err = v.writeObject(context.Background(), "recent/"+loc, nil)
750 v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
751 return s3AWSZeroTime, v.translateError(err)
753 v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+loc)
754 resp, err = v.Head("recent/" + loc)
756 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
757 return s3AWSZeroTime, v.translateError(err)
759 } else if err != nil {
760 // HEAD recent/X failed for some other reason.
761 return s3AWSZeroTime, err
763 return *resp.LastModified, err
766 // Status returns a *VolumeStatus representing the current in-use
767 // storage capacity and a fake available capacity that doesn't make
768 // the volume seem full or nearly-full.
769 func (v *S3AWSVolume) Status() *VolumeStatus {
770 return &VolumeStatus{
772 BytesFree: BlockSize * 1000,
777 // InternalStats returns bucket I/O and API call counters.
778 func (v *S3AWSVolume) InternalStats() interface{} {
779 return &v.bucket.stats
782 // Touch sets the timestamp for the given locator to the current time.
783 func (v *S3AWSVolume) Touch(loc string) error {
784 if v.volume.ReadOnly {
785 return MethodDisabledError
787 _, err := v.Head(loc)
788 err = v.translateError(err)
789 if os.IsNotExist(err) && v.fixRace(loc) {
790 // The data object got trashed in a race, but fixRace
792 } else if err != nil {
795 err = v.writeObject(context.Background(), "recent/"+loc, nil)
796 return v.translateError(err)
799 // checkRaceWindow returns a non-nil error if trash/loc is, or might
800 // be, in the race window (i.e., it's not safe to trash loc).
801 func (v *S3AWSVolume) checkRaceWindow(loc string) error {
802 resp, err := v.Head("trash/" + loc)
803 err = v.translateError(err)
804 if os.IsNotExist(err) {
805 // OK, trash/X doesn't exist so we're not in the race
808 } else if err != nil {
809 // Error looking up trash/X. We don't know whether
810 // we're in the race window
813 t := resp.LastModified
814 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
816 // We can't count on "touch trash/X" to prolong
817 // trash/X's lifetime. The new timestamp might not
818 // become visible until now+raceWindow, and EmptyTrash
819 // is allowed to delete trash/X before then.
820 return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
822 // trash/X exists, but it won't be eligible for deletion until
823 // after now+raceWindow, so it's safe to overwrite it.
827 func (b *s3AWSbucket) Del(path string) error {
828 input := &s3.DeleteObjectInput{
829 Bucket: aws.String(b.bucket),
830 Key: aws.String(path),
832 req := b.svc.DeleteObjectRequest(input)
833 _, err := req.Send(context.Background())
834 //err := b.Bucket().Del(path)
835 b.stats.TickOps("delete")
836 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
841 // Trash a Keep block.
842 func (v *S3AWSVolume) Trash(loc string) error {
843 if v.volume.ReadOnly {
844 return MethodDisabledError
846 if t, err := v.Mtime(loc); err != nil {
848 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
851 if v.cluster.Collections.BlobTrashLifetime == 0 {
853 return ErrS3TrashDisabled
855 return v.translateError(v.bucket.Del(loc))
857 err := v.checkRaceWindow(loc)
861 err = v.safeCopy("trash/"+loc, loc)
865 return v.translateError(v.bucket.Del(loc))
868 // Untrash moves block from trash back into store
869 func (v *S3AWSVolume) Untrash(loc string) error {
870 err := v.safeCopy(loc, "trash/"+loc)
874 err = v.writeObject(context.Background(), "recent/"+loc, nil)
875 return v.translateError(err)
878 type s3awsbucketStats struct {
888 func (s *s3awsbucketStats) TickErr(err error) {
892 errType := fmt.Sprintf("%T", err)
893 if aerr, ok := err.(awserr.Error); ok {
894 if reqErr, ok := err.(awserr.RequestFailure); ok {
895 // A service error occurred
896 errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
898 errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
901 s.statsTicker.TickErr(err, errType)