1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/aws/awserr"
26 "github.com/aws/aws-sdk-go-v2/aws/defaults"
27 "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
28 "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
29 "github.com/aws/aws-sdk-go-v2/aws/endpoints"
30 "github.com/aws/aws-sdk-go-v2/service/s3"
31 "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
32 "github.com/prometheus/client_golang/prometheus"
33 "github.com/sirupsen/logrus"
36 // S3AWSVolume implements Volume using an S3 bucket.
37 type S3AWSVolume struct {
38 arvados.S3VolumeDriverParameters
39 AuthToken string // populated automatically when IAMRole is used
40 AuthExpiration time.Time // populated automatically when IAMRole is used
42 cluster *arvados.Cluster
44 logger logrus.FieldLogger
45 metrics *volumeMetricsVecs
51 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
52 // wrapped bucket can be replaced atomically with SetBucket in order
53 // to update credentials.
54 type s3AWSbucket struct {
57 stats s3awsbucketStats
61 // chooseS3VolumeDriver distinguishes between the old goamz driver and
62 // aws-sdk-go based on the UseAWSS3v2Driver feature flag
63 func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
64 v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
65 err := json.Unmarshal(volume.DriverParameters, v)
69 if v.UseAWSS3v2Driver {
70 logger.Debugln("Using AWS S3 v2 driver")
71 return newS3AWSVolume(cluster, volume, logger, metrics)
73 logger.Debugln("Using goamz S3 driver")
74 return newS3Volume(cluster, volume, logger, metrics)
78 PartSize = 5 * 1024 * 1024
83 var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
84 var s3AWSZeroTime time.Time
86 func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
87 if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
88 s = s[v.PrefixLength+1:]
90 return s, s3AWSKeepBlockRegexp.MatchString(s)
93 // Return the key used for a given loc. If PrefixLength==0 then
94 // key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
95 // "abc/abcdef0123", etc.
96 func (v *S3AWSVolume) key(loc string) string {
97 if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
98 return loc[:v.PrefixLength] + "/" + loc
104 func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
105 v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
106 err := json.Unmarshal(volume.DriverParameters, v)
110 v.logger = logger.WithField("Volume", v.String())
111 return v, v.check("")
114 func (v *S3AWSVolume) translateError(err error) error {
115 if aerr, ok := err.(awserr.Error); ok {
118 return os.ErrNotExist
120 return os.ErrNotExist
126 // safeCopy calls CopyObjectRequest, and checks the response to make
127 // sure the copy succeeded and updated the timestamp on the
128 // destination object
130 // (If something goes wrong during the copy, the error will be
131 // embedded in the 200 OK response)
132 func (v *S3AWSVolume) safeCopy(dst, src string) error {
133 input := &s3.CopyObjectInput{
134 Bucket: aws.String(v.bucket.bucket),
135 ContentType: aws.String("application/octet-stream"),
136 CopySource: aws.String(v.bucket.bucket + "/" + src),
137 Key: aws.String(dst),
140 req := v.bucket.svc.CopyObjectRequest(input)
141 resp, err := req.Send(context.Background())
143 err = v.translateError(err)
144 if os.IsNotExist(err) {
146 } else if err != nil {
147 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
150 if resp.CopyObjectResult.LastModified == nil {
151 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
152 } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
153 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
158 func (v *S3AWSVolume) check(ec2metadataHostname string) error {
160 return errors.New("DriverParameters: Bucket must be provided")
162 if v.IndexPageSize == 0 {
163 v.IndexPageSize = 1000
165 if v.RaceWindow < 0 {
166 return errors.New("DriverParameters: RaceWindow must not be negative")
170 return errors.New("DriverParameters: V2Signature is not supported")
173 defaultResolver := endpoints.NewDefaultResolver()
175 cfg := defaults.Config()
177 if v.Endpoint == "" && v.Region == "" {
178 return fmt.Errorf("AWS region or endpoint must be specified")
179 } else if v.Endpoint != "" || ec2metadataHostname != "" {
180 myCustomResolver := func(service, region string) (aws.Endpoint, error) {
181 if v.Endpoint != "" && service == "s3" {
184 SigningRegion: v.Region,
186 } else if service == "ec2metadata" && ec2metadataHostname != "" {
188 URL: ec2metadataHostname,
192 return defaultResolver.ResolveEndpoint(service, region)
194 cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
197 cfg.Region = v.Region
199 // Zero timeouts mean "wait forever", which is a bad
200 // default. Default to long timeouts instead.
201 if v.ConnectTimeout == 0 {
202 v.ConnectTimeout = s3DefaultConnectTimeout
204 if v.ReadTimeout == 0 {
205 v.ReadTimeout = s3DefaultReadTimeout
208 creds := aws.NewChainProvider(
209 []aws.CredentialsProvider{
210 aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken),
211 ec2rolecreds.New(ec2metadata.New(cfg)),
214 cfg.Credentials = creds
216 v.bucket = &s3AWSbucket{
221 // Set up prometheus metrics
222 lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
223 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
228 // String implements fmt.Stringer.
229 func (v *S3AWSVolume) String() string {
230 return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
233 // GetDeviceID returns a globally unique ID for the storage bucket.
234 func (v *S3AWSVolume) GetDeviceID() string {
235 return "s3://" + v.Endpoint + "/" + v.Bucket
238 // Compare the given data with the stored data.
239 func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
241 errChan := make(chan error, 1)
243 _, err := v.head("recent/" + key)
250 case err = <-errChan:
253 // Checking for the key itself here would interfere
254 // with future GET requests.
256 // On AWS, if X doesn't exist, a HEAD or GET request
257 // for X causes X's non-existence to be cached. Thus,
258 // if we test for X, then create X and return a
259 // signature to our client, the client might still get
260 // 404 from all keepstores when trying to read it.
262 // To avoid this, we avoid doing HEAD X or GET X until
263 // we know X has been written.
265 // Note that X might exist even though recent/X
266 // doesn't: for example, the response to HEAD recent/X
267 // might itself come from a stale cache. In such
268 // cases, we will return a false negative and
269 // PutHandler might needlessly create another replica
270 // on a different volume. That's not ideal, but it's
271 // better than passing the eventually-consistent
272 // problem on to our clients.
273 return v.translateError(err)
276 input := &s3.GetObjectInput{
277 Bucket: aws.String(v.bucket.bucket),
278 Key: aws.String(key),
281 req := v.bucket.svc.GetObjectRequest(input)
282 result, err := req.Send(ctx)
284 return v.translateError(err)
286 return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
289 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
290 // and deletes them from the volume.
291 func (v *S3AWSVolume) EmptyTrash() {
292 if v.cluster.Collections.BlobDeleteConcurrency < 1 {
296 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
298 // Define "ready to delete" as "...when EmptyTrash started".
301 emptyOneKey := func(trash *s3.Object) {
302 key := strings.TrimPrefix(*trash.Key, "trash/")
303 loc, isblk := v.isKeepBlock(key)
307 atomic.AddInt64(&bytesInTrash, *trash.Size)
308 atomic.AddInt64(&blocksInTrash, 1)
310 trashT := *trash.LastModified
311 recent, err := v.head("recent/" + key)
312 if err != nil && os.IsNotExist(v.translateError(err)) {
313 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
316 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
319 } else if err != nil {
320 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
323 if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
324 if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
325 // recent/key is too old to protect
326 // loc from being Trashed again during
327 // the raceWindow that starts if we
328 // delete trash/X now.
330 // Note this means (TrashSweepInterval
331 // < BlobSigningTTL - raceWindow) is
332 // necessary to avoid starvation.
333 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
338 _, err := v.head(key)
339 if os.IsNotExist(err) {
340 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
343 } else if err != nil {
344 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
348 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
351 err = v.bucket.Del(*trash.Key)
353 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
356 atomic.AddInt64(&bytesDeleted, *trash.Size)
357 atomic.AddInt64(&blocksDeleted, 1)
359 _, err = v.head(*trash.Key)
361 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
364 if !os.IsNotExist(v.translateError(err)) {
365 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
368 err = v.bucket.Del("recent/" + key)
370 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
374 var wg sync.WaitGroup
375 todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
376 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
380 for key := range todo {
386 trashL := s3awsLister{
390 PageSize: v.IndexPageSize,
391 Stats: &v.bucket.stats,
393 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
399 if err := trashL.Error(); err != nil {
400 v.logger.WithError(err).Error("EmptyTrash: lister failed")
402 v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
405 // fixRace(X) is called when "recent/X" exists but "X" doesn't
406 // exist. If the timestamps on "recent/X" and "trash/X" indicate there
407 // was a race between Put and Trash, fixRace recovers from the race by
408 // Untrashing the block.
409 func (v *S3AWSVolume) fixRace(key string) bool {
410 trash, err := v.head("trash/" + key)
412 if !os.IsNotExist(v.translateError(err)) {
413 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
418 recent, err := v.head("recent/" + key)
420 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
424 recentTime := *recent.LastModified
425 trashTime := *trash.LastModified
426 ageWhenTrashed := trashTime.Sub(recentTime)
427 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
428 // No evidence of a race: block hasn't been written
429 // since it became eligible for Trash. No fix needed.
433 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
434 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
435 err = v.safeCopy(key, "trash/"+key)
437 v.logger.WithError(err).Error("fixRace: copy failed")
443 func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error) {
444 input := &s3.HeadObjectInput{
445 Bucket: aws.String(v.bucket.bucket),
446 Key: aws.String(key),
449 req := v.bucket.svc.HeadObjectRequest(input)
450 res, err := req.Send(context.TODO())
452 v.bucket.stats.TickOps("head")
453 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
454 v.bucket.stats.TickErr(err)
457 return nil, v.translateError(err)
459 result = res.HeadObjectOutput
463 // Get a block: copy the block data into buf, and return the number of
465 func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
466 // Do not use getWithPipe here: the BlockReader interface does not pass
467 // through 'buf []byte', and we don't want to allocate two buffers for each
468 // read request. Instead, use a version of ReadBlock that accepts 'buf []byte'
470 return v.ReadBlock(ctx, loc, buf)
473 func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, buf []byte) (int, error) {
475 count, err := v.readWorker(ctx, key, buf)
477 v.bucket.stats.TickInBytes(uint64(count))
481 err = v.translateError(err)
482 if !os.IsNotExist(err) {
486 _, err = v.head("recent/" + key)
487 err = v.translateError(err)
489 // If we can't read recent/X, there's no point in
490 // trying fixRace. Give up.
498 count, err = v.readWorker(ctx, key, buf)
500 v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
501 err = v.translateError(err)
504 v.bucket.stats.TickInBytes(uint64(count))
508 func (v *S3AWSVolume) readWorker(ctx context.Context, key string, buf []byte) (int, error) {
509 awsBuf := aws.NewWriteAtBuffer(buf)
510 downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
511 u.PartSize = PartSize
512 u.Concurrency = ReadConcurrency
515 v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
517 count, err := downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
518 Bucket: aws.String(v.bucket.bucket),
519 Key: aws.String(key),
521 v.bucket.stats.TickOps("get")
522 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
523 v.bucket.stats.TickErr(err)
525 return 0, v.translateError(err)
529 return int(count), err
532 func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader) error {
534 // r == nil leads to a memory violation in func readFillBuf in
535 // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
536 r = bytes.NewReader(nil)
539 uploadInput := s3manager.UploadInput{
540 Bucket: aws.String(v.bucket.bucket),
541 Key: aws.String(key),
545 if loc, ok := v.isKeepBlock(key); ok {
546 var contentMD5 string
547 md5, err := hex.DecodeString(loc)
551 contentMD5 = base64.StdEncoding.EncodeToString(md5)
552 uploadInput.ContentMD5 = &contentMD5
555 // Experimentation indicated that using concurrency 5 yields the best
556 // throughput, better than higher concurrency (10 or 13) by ~5%.
557 // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
558 // is detrimental to througput (minus ~15%).
559 uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
560 u.PartSize = PartSize
561 u.Concurrency = WriteConcurrency
564 // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
565 // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
566 // block, so there is no extra memory use to be concerned about. See
567 // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
568 // calculating the Sha-256 because we don't need it; we already use md5sum
569 // hashes that match the name of the block.
570 _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
571 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
574 v.bucket.stats.TickOps("put")
575 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
576 v.bucket.stats.TickErr(err)
581 // Put writes a block.
582 func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
583 // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
584 // sdk to avoid memory allocation there. See #17339 for more information.
585 return v.WriteBlock(ctx, loc, bytes.NewReader(block))
588 // WriteBlock implements BlockWriter.
589 func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
590 if v.volume.ReadOnly {
591 return MethodDisabledError
594 r := NewCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
596 err := v.writeObject(ctx, key, r)
600 return v.writeObject(ctx, "recent/"+key, nil)
603 type s3awsLister struct {
604 Logger logrus.FieldLogger
608 Stats *s3awsbucketStats
609 ContinuationToken string
614 // First fetches the first page and returns the first item. It returns
615 // nil if the response is the empty set or an error occurs.
616 func (lister *s3awsLister) First() *s3.Object {
621 // Next returns the next item, fetching the next page if necessary. It
622 // returns nil if the last available item has already been fetched, or
624 func (lister *s3awsLister) Next() *s3.Object {
625 if len(lister.buf) == 0 && lister.ContinuationToken != "" {
631 // Return the most recent error encountered by First or Next.
632 func (lister *s3awsLister) Error() error {
636 func (lister *s3awsLister) getPage() {
637 lister.Stats.TickOps("list")
638 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
640 var input *s3.ListObjectsV2Input
641 if lister.ContinuationToken == "" {
642 input = &s3.ListObjectsV2Input{
643 Bucket: aws.String(lister.Bucket.bucket),
644 MaxKeys: aws.Int64(int64(lister.PageSize)),
645 Prefix: aws.String(lister.Prefix),
648 input = &s3.ListObjectsV2Input{
649 Bucket: aws.String(lister.Bucket.bucket),
650 MaxKeys: aws.Int64(int64(lister.PageSize)),
651 Prefix: aws.String(lister.Prefix),
652 ContinuationToken: &lister.ContinuationToken,
656 req := lister.Bucket.svc.ListObjectsV2Request(input)
657 resp, err := req.Send(context.Background())
659 if aerr, ok := err.(awserr.Error); ok {
667 if *resp.IsTruncated {
668 lister.ContinuationToken = *resp.NextContinuationToken
670 lister.ContinuationToken = ""
672 lister.buf = make([]s3.Object, 0, len(resp.Contents))
673 for _, key := range resp.Contents {
674 if !strings.HasPrefix(*key.Key, lister.Prefix) {
675 lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
678 lister.buf = append(lister.buf, key)
682 func (lister *s3awsLister) pop() (k *s3.Object) {
683 if len(lister.buf) > 0 {
685 lister.buf = lister.buf[1:]
690 // IndexTo writes a complete list of locators with the given prefix
691 // for which Get() can retrieve data.
692 func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
693 prefix = v.key(prefix)
694 // Use a merge sort to find matching sets of X and recent/X.
695 dataL := s3awsLister{
699 PageSize: v.IndexPageSize,
700 Stats: &v.bucket.stats,
702 recentL := s3awsLister{
705 Prefix: "recent/" + prefix,
706 PageSize: v.IndexPageSize,
707 Stats: &v.bucket.stats,
709 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
710 if *data.Key >= "g" {
711 // Conveniently, "recent/*" and "trash/*" are
712 // lexically greater than all hex-encoded data
713 // hashes, so stopping here avoids iterating
714 // over all of them needlessly with dataL.
717 loc, isblk := v.isKeepBlock(*data.Key)
722 // stamp is the list entry we should use to report the
723 // last-modified time for this data block: it will be
724 // the recent/X entry if one exists, otherwise the
725 // entry for the data block itself.
728 // Advance to the corresponding recent/X marker, if any
729 for recent != nil && recentL.Error() == nil {
730 if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
731 recent = recentL.Next()
735 recent = recentL.Next()
738 // recent/X marker is missing: we'll
739 // use the timestamp on the data
744 if err := recentL.Error(); err != nil {
747 // We truncate sub-second precision here. Otherwise
748 // timestamps will never match the RFC1123-formatted
749 // Last-Modified values parsed by Mtime().
750 fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000)
755 // Mtime returns the stored timestamp for the given locator.
756 func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
758 _, err := v.head(key)
760 return s3AWSZeroTime, v.translateError(err)
762 resp, err := v.head("recent/" + key)
763 err = v.translateError(err)
764 if os.IsNotExist(err) {
765 // The data object X exists, but recent/X is missing.
766 err = v.writeObject(context.Background(), "recent/"+key, nil)
768 v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
769 return s3AWSZeroTime, v.translateError(err)
771 v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key)
772 resp, err = v.head("recent/" + key)
774 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
775 return s3AWSZeroTime, v.translateError(err)
777 } else if err != nil {
778 // HEAD recent/X failed for some other reason.
779 return s3AWSZeroTime, err
781 return *resp.LastModified, err
784 // Status returns a *VolumeStatus representing the current in-use
785 // storage capacity and a fake available capacity that doesn't make
786 // the volume seem full or nearly-full.
787 func (v *S3AWSVolume) Status() *VolumeStatus {
788 return &VolumeStatus{
790 BytesFree: BlockSize * 1000,
795 // InternalStats returns bucket I/O and API call counters.
796 func (v *S3AWSVolume) InternalStats() interface{} {
797 return &v.bucket.stats
800 // Touch sets the timestamp for the given locator to the current time.
801 func (v *S3AWSVolume) Touch(loc string) error {
802 if v.volume.ReadOnly {
803 return MethodDisabledError
806 _, err := v.head(key)
807 err = v.translateError(err)
808 if os.IsNotExist(err) && v.fixRace(key) {
809 // The data object got trashed in a race, but fixRace
811 } else if err != nil {
814 err = v.writeObject(context.Background(), "recent/"+key, nil)
815 return v.translateError(err)
818 // checkRaceWindow returns a non-nil error if trash/key is, or might
819 // be, in the race window (i.e., it's not safe to trash key).
820 func (v *S3AWSVolume) checkRaceWindow(key string) error {
821 resp, err := v.head("trash/" + key)
822 err = v.translateError(err)
823 if os.IsNotExist(err) {
824 // OK, trash/X doesn't exist so we're not in the race
827 } else if err != nil {
828 // Error looking up trash/X. We don't know whether
829 // we're in the race window
832 t := resp.LastModified
833 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
835 // We can't count on "touch trash/X" to prolong
836 // trash/X's lifetime. The new timestamp might not
837 // become visible until now+raceWindow, and EmptyTrash
838 // is allowed to delete trash/X before then.
839 return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
841 // trash/X exists, but it won't be eligible for deletion until
842 // after now+raceWindow, so it's safe to overwrite it.
846 func (b *s3AWSbucket) Del(path string) error {
847 input := &s3.DeleteObjectInput{
848 Bucket: aws.String(b.bucket),
849 Key: aws.String(path),
851 req := b.svc.DeleteObjectRequest(input)
852 _, err := req.Send(context.Background())
853 b.stats.TickOps("delete")
854 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
859 // Trash a Keep block.
860 func (v *S3AWSVolume) Trash(loc string) error {
861 if v.volume.ReadOnly {
862 return MethodDisabledError
864 if t, err := v.Mtime(loc); err != nil {
866 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
870 if v.cluster.Collections.BlobTrashLifetime == 0 {
872 return ErrS3TrashDisabled
874 return v.translateError(v.bucket.Del(key))
876 err := v.checkRaceWindow(key)
880 err = v.safeCopy("trash/"+key, key)
884 return v.translateError(v.bucket.Del(key))
887 // Untrash moves block from trash back into store
888 func (v *S3AWSVolume) Untrash(loc string) error {
890 err := v.safeCopy(key, "trash/"+key)
894 err = v.writeObject(context.Background(), "recent/"+key, nil)
895 return v.translateError(err)
898 type s3awsbucketStats struct {
908 func (s *s3awsbucketStats) TickErr(err error) {
912 errType := fmt.Sprintf("%T", err)
913 if aerr, ok := err.(awserr.Error); ok {
914 if reqErr, ok := err.(awserr.RequestFailure); ok {
915 // A service error occurred
916 errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
918 errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
921 s.statsTicker.TickErr(err, errType)