1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
23 "git.arvados.org/arvados.git/sdk/go/arvados"
24 "github.com/aws/aws-sdk-go-v2/aws"
25 "github.com/aws/aws-sdk-go-v2/aws/awserr"
26 "github.com/aws/aws-sdk-go-v2/aws/defaults"
27 "github.com/aws/aws-sdk-go-v2/aws/ec2metadata"
28 "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds"
29 "github.com/aws/aws-sdk-go-v2/aws/endpoints"
30 "github.com/aws/aws-sdk-go-v2/service/s3"
31 "github.com/aws/aws-sdk-go-v2/service/s3/s3manager"
32 "github.com/prometheus/client_golang/prometheus"
33 "github.com/sirupsen/logrus"
36 // S3Volume implements Volume using an S3 bucket.
37 type S3AWSVolume struct {
38 arvados.S3VolumeDriverParameters
39 AuthToken string // populated automatically when IAMRole is used
40 AuthExpiration time.Time // populated automatically when IAMRole is used
42 cluster *arvados.Cluster
44 logger logrus.FieldLogger
45 metrics *volumeMetricsVecs
51 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
52 // wrapped bucket can be replaced atomically with SetBucket in order
53 // to update credentials.
54 type s3AWSbucket struct {
57 stats s3awsbucketStats
61 // chooseS3VolumeDriver distinguishes between the old goamz driver and
62 // aws-sdk-go based on the AlternateDriver feature flag
63 func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
64 v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
65 err := json.Unmarshal(volume.DriverParameters, &v)
69 if v.AlternateDriver {
70 logger.Debugln("Using alternate S3 driver (aws-go)")
71 return newS3AWSVolume(cluster, volume, logger, metrics)
73 logger.Debugln("Using standard S3 driver (goamz)")
74 return newS3Volume(cluster, volume, logger, metrics)
78 var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
79 var s3AWSZeroTime time.Time
81 func (v *S3AWSVolume) isKeepBlock(s string) bool {
82 return s3AWSKeepBlockRegexp.MatchString(s)
85 func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
86 logger.Debugln("in newS3AWSVolume")
87 v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
88 err := json.Unmarshal(volume.DriverParameters, &v)
92 v.logger = logger.WithField("Volume", v.String())
93 v.logger.Debugln("in newS3AWSVolume after volume set")
97 func (v *S3AWSVolume) translateError(err error) error {
98 if aerr, ok := err.(awserr.Error); ok {
101 return os.ErrNotExist
103 return os.ErrNotExist
109 // safeCopy calls CopyObjectRequest, and checks the response to make sure the
110 // copy succeeded and updated the timestamp on the destination object
112 // (If something goes wrong during the copy, the error will be embedded in the
114 func (v *S3AWSVolume) safeCopy(dst, src string) error {
115 input := &s3.CopyObjectInput{
116 Bucket: aws.String(v.bucket.bucket),
117 ContentType: aws.String("application/octet-stream"),
118 CopySource: aws.String(v.bucket.bucket + "/" + src),
119 Key: aws.String(dst),
122 req := v.bucket.svc.CopyObjectRequest(input)
123 resp, err := req.Send(context.Background())
125 err = v.translateError(err)
126 if os.IsNotExist(err) {
128 } else if err != nil {
129 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err)
132 if resp.CopyObjectResult.LastModified == nil {
133 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err)
134 } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew {
135 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified)
140 func (v *S3AWSVolume) check(ec2metadataHostname string) error {
142 return errors.New("DriverParameters: Bucket must be provided")
144 if v.IndexPageSize == 0 {
145 v.IndexPageSize = 1000
147 if v.RaceWindow < 0 {
148 return errors.New("DriverParameters: RaceWindow must not be negative")
151 defaultResolver := endpoints.NewDefaultResolver()
153 cfg := defaults.Config()
155 if v.Endpoint == "" && v.Region == "" {
156 return fmt.Errorf("AWS region or endpoint must be specified")
157 } else if v.Endpoint != "" || ec2metadataHostname != "" {
158 myCustomResolver := func(service, region string) (aws.Endpoint, error) {
159 if v.Endpoint != "" && service == "s3" {
162 SigningRegion: v.Region,
164 } else if service == "ec2metadata" && ec2metadataHostname != "" {
166 URL: ec2metadataHostname,
170 return defaultResolver.ResolveEndpoint(service, region)
172 cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver)
175 cfg.Region = v.Region
177 // Zero timeouts mean "wait forever", which is a bad
178 // default. Default to long timeouts instead.
179 if v.ConnectTimeout == 0 {
180 v.ConnectTimeout = s3DefaultConnectTimeout
182 if v.ReadTimeout == 0 {
183 v.ReadTimeout = s3DefaultReadTimeout
186 creds := aws.NewChainProvider(
187 []aws.CredentialsProvider{
188 aws.NewStaticCredentialsProvider(v.AccessKey, v.SecretKey, v.AuthToken),
189 ec2rolecreds.New(ec2metadata.New(cfg)),
192 cfg.Credentials = creds
194 v.bucket = &s3AWSbucket{
199 // Set up prometheus metrics
200 lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
201 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
206 // String implements fmt.Stringer.
207 func (v *S3AWSVolume) String() string {
208 return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
211 // GetDeviceID returns a globally unique ID for the storage bucket.
212 func (v *S3AWSVolume) GetDeviceID() string {
213 return "s3://" + v.Endpoint + "/" + v.Bucket
216 // Compare the given data with the stored data.
217 func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
218 errChan := make(chan error, 1)
220 _, err := v.Head("recent/" + loc)
227 case err = <-errChan:
230 // Checking for "loc" itself here would interfere with
231 // future GET requests.
233 // On AWS, if X doesn't exist, a HEAD or GET request
234 // for X causes X's non-existence to be cached. Thus,
235 // if we test for X, then create X and return a
236 // signature to our client, the client might still get
237 // 404 from all keepstores when trying to read it.
239 // To avoid this, we avoid doing HEAD X or GET X until
240 // we know X has been written.
242 // Note that X might exist even though recent/X
243 // doesn't: for example, the response to HEAD recent/X
244 // might itself come from a stale cache. In such
245 // cases, we will return a false negative and
246 // PutHandler might needlessly create another replica
247 // on a different volume. That's not ideal, but it's
248 // better than passing the eventually-consistent
249 // problem on to our clients.
250 return v.translateError(err)
253 input := &s3.GetObjectInput{
254 Bucket: aws.String(v.bucket.bucket),
255 Key: aws.String(loc),
258 req := v.bucket.svc.GetObjectRequest(input)
259 result, err := req.Send(ctx)
261 return v.translateError(err)
263 return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
266 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
267 // and deletes them from the volume.
268 func (v *S3AWSVolume) EmptyTrash() {
269 if v.cluster.Collections.BlobDeleteConcurrency < 1 {
273 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
275 // Define "ready to delete" as "...when EmptyTrash started".
278 emptyOneKey := func(trash *s3.Object) {
279 v.logger.Warnf("EmptyTrash: looking for trash marker %s with last modified date %s", *trash.Key, *trash.LastModified)
280 loc := strings.TrimPrefix(*trash.Key, "trash/")
281 if !v.isKeepBlock(loc) {
284 atomic.AddInt64(&bytesInTrash, *trash.Size)
285 atomic.AddInt64(&blocksInTrash, 1)
287 trashT := *(trash.LastModified)
288 v.logger.Infof("HEEEEEEE trashT key: %s, type: %T val: %s, startT is %s", *trash.Key, trashT, trashT, startT)
289 recent, err := v.Head("recent/" + loc)
290 if err != nil && os.IsNotExist(v.translateError(err)) {
291 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
294 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
297 } else if err != nil {
298 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
301 v.logger.Infof("recent.LastModified type: %T val: %s", recent.LastModified, recent.LastModified)
302 if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
303 v.logger.Infof("HERE! recent.lastmodified is smaller than blobsigningttl")
304 if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
305 // recent/loc is too old to protect
306 // loc from being Trashed again during
307 // the raceWindow that starts if we
308 // delete trash/X now.
310 // Note this means (TrashSweepInterval
311 // < BlobSigningTTL - raceWindow) is
312 // necessary to avoid starvation.
313 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
318 _, err := v.Head(loc)
319 if os.IsNotExist(err) {
320 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
323 } else if err != nil {
324 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
328 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
329 v.logger.Infof("HERE! trashT for %s is smaller than blobtrashlifetime: %s < %s", *trash.Key, startT.Sub(trashT), v.cluster.Collections.BlobTrashLifetime.Duration())
332 err = v.bucket.Del(*trash.Key)
334 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key)
337 atomic.AddInt64(&bytesDeleted, *trash.Size)
338 atomic.AddInt64(&blocksDeleted, 1)
340 v.logger.Infof("HERE! trash.Key %s should have been deleted", *trash.Key)
343 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
346 if !os.IsNotExist(v.translateError(err)) {
347 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
350 err = v.bucket.Del("recent/" + loc)
352 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
354 v.logger.Infof("HERE! recent/%s should have been deleted", loc)
357 var wg sync.WaitGroup
358 todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency)
359 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
363 for key := range todo {
369 trashL := s3awsLister{
373 PageSize: v.IndexPageSize,
374 Stats: &v.bucket.stats,
376 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
382 if err := trashL.Error(); err != nil {
383 v.logger.WithError(err).Error("EmptyTrash: lister failed")
385 v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
388 // fixRace(X) is called when "recent/X" exists but "X" doesn't
389 // exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
390 // there was a race between Put and Trash, fixRace recovers from the
391 // race by Untrashing the block.
392 func (v *S3AWSVolume) fixRace(loc string) bool {
393 trash, err := v.Head("trash/" + loc)
395 if !os.IsNotExist(v.translateError(err)) {
396 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+loc)
401 recent, err := v.Head("recent/" + loc)
403 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+loc)
407 recentTime := *recent.LastModified
408 trashTime := *trash.LastModified
409 ageWhenTrashed := trashTime.Sub(recentTime)
410 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
411 // No evidence of a race: block hasn't been written
412 // since it became eligible for Trash. No fix needed.
416 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
417 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
418 err = v.safeCopy(loc, "trash/"+loc)
420 v.logger.WithError(err).Error("fixRace: copy failed")
426 func (v *S3AWSVolume) Head(loc string) (result *s3.HeadObjectOutput, err error) {
427 input := &s3.HeadObjectInput{
428 Bucket: aws.String(v.bucket.bucket),
429 Key: aws.String(loc),
432 req := v.bucket.svc.HeadObjectRequest(input)
433 res, err := req.Send(context.TODO())
435 v.bucket.stats.TickOps("head")
436 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps)
437 v.bucket.stats.TickErr(err)
440 return nil, v.translateError(err)
442 result = res.HeadObjectOutput
446 // Get a block: copy the block data into buf, and return the number of
448 func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
449 return getWithPipe(ctx, loc, buf, v)
452 func (v *S3AWSVolume) readWorker(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
453 buf := make([]byte, 0, 67108864)
454 awsBuf := aws.NewWriteAtBuffer(buf)
456 downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
457 u.PartSize = 5 * 1024 * 1024
461 v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
463 _, err = downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
464 Bucket: aws.String(v.bucket.bucket),
465 Key: aws.String(loc),
467 v.bucket.stats.TickOps("get")
468 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
469 v.bucket.stats.TickErr(err)
471 return nil, v.translateError(err)
475 rdr = NewCountingReader(bytes.NewReader(buf), v.bucket.stats.TickInBytes)
479 // ReadBlock implements BlockReader.
480 func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
481 rdr, err := v.readWorker(ctx, loc)
484 _, err2 := io.Copy(w, rdr)
491 err = v.translateError(err)
492 if !os.IsNotExist(err) {
496 _, err = v.Head("recent/" + loc)
497 err = v.translateError(err)
499 // If we can't read recent/X, there's no point in
500 // trying fixRace. Give up.
508 rdr, err = v.readWorker(ctx, loc)
510 v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
511 err = v.translateError(err)
515 _, err = io.Copy(w, rdr)
520 func (b *s3AWSbucket) PutReader(path string, r io.Reader, length int64, contType string, contentMD5 string, contentSHA256 string) error {
522 // aws-sdk-go will only send Content-Length: 0 when reader
523 // is nil due to net.http.Request.ContentLength
524 // behavior. Otherwise, Content-Length header is
525 // omitted which will cause some S3 services
526 // (including AWS and Ceph RadosGW) to fail to create
528 r = bytes.NewReader([]byte{})
530 r = NewCountingReader(r, b.stats.TickOutBytes)
532 uploader := s3manager.NewUploaderWithClient(b.svc)
533 _, err := uploader.Upload(&s3manager.UploadInput{
534 Bucket: aws.String(b.bucket),
535 Key: aws.String(path),
537 }, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
538 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
541 b.stats.TickOps("put")
542 b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
547 // Put writes a block.
548 func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
549 return putWithPipe(ctx, loc, block, v)
552 // WriteBlock implements BlockWriter.
553 func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
554 if v.volume.ReadOnly {
555 return MethodDisabledError
558 r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
559 uploadInput := s3manager.UploadInput{
560 Bucket: aws.String(v.bucket.bucket),
561 Key: aws.String(loc),
565 //var contentMD5, contentSHA256 string
566 var contentMD5 string
567 md5, err := hex.DecodeString(loc)
571 contentMD5 = base64.StdEncoding.EncodeToString(md5)
572 // See if this is the empty block
573 if contentMD5 != "d41d8cd98f00b204e9800998ecf8427e" {
574 uploadInput.ContentMD5 = &contentMD5
577 // Some experimentation indicated that using concurrency 5 yields the best
578 // throughput, better than higher concurrency (10 or 13) by ~5%.
579 // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
580 // is detrimental to througput (minus ~15%).
581 uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
582 u.PartSize = 5 * 1024 * 1024
586 // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
587 // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the
588 // block, so there is no extra memory use to be concerned about. See
589 // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
590 // calculating the Sha-256 because we don't need it; we already use md5sum
591 // hashes that match the name of the block.
592 _, err = uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
593 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
596 v.bucket.stats.TickOps("put")
597 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
598 v.bucket.stats.TickErr(err)
603 empty := bytes.NewReader([]byte{})
604 _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
605 Bucket: aws.String(v.bucket.bucket),
606 Key: aws.String("recent/" + loc),
608 }, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
609 r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
611 v.bucket.stats.TickOps("put")
612 v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
613 v.bucket.stats.TickErr(err)
618 type s3awsLister struct {
619 Logger logrus.FieldLogger
620 Bucket *s3AWSbucket //*s3.Bucket
623 Stats *s3awsbucketStats
624 ContinuationToken string
629 // First fetches the first page and returns the first item. It returns
630 // nil if the response is the empty set or an error occurs.
631 func (lister *s3awsLister) First() *s3.Object {
636 // Next returns the next item, fetching the next page if necessary. It
637 // returns nil if the last available item has already been fetched, or
639 func (lister *s3awsLister) Next() *s3.Object {
640 if len(lister.buf) == 0 && lister.ContinuationToken != "" {
646 // Return the most recent error encountered by First or Next.
647 func (lister *s3awsLister) Error() error {
651 func (lister *s3awsLister) getPage() {
652 lister.Stats.TickOps("list")
653 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
655 var input *s3.ListObjectsV2Input
656 if lister.ContinuationToken == "" {
657 input = &s3.ListObjectsV2Input{
658 Bucket: aws.String(lister.Bucket.bucket),
659 MaxKeys: aws.Int64(int64(lister.PageSize)),
660 Prefix: aws.String(lister.Prefix),
663 input = &s3.ListObjectsV2Input{
664 Bucket: aws.String(lister.Bucket.bucket),
665 MaxKeys: aws.Int64(int64(lister.PageSize)),
666 Prefix: aws.String(lister.Prefix),
667 ContinuationToken: &lister.ContinuationToken,
671 req := lister.Bucket.svc.ListObjectsV2Request(input)
672 resp, err := req.Send(context.Background())
674 if aerr, ok := err.(awserr.Error); ok {
682 if *resp.IsTruncated {
683 lister.ContinuationToken = *resp.NextContinuationToken
685 lister.ContinuationToken = ""
687 lister.buf = make([]s3.Object, 0, len(resp.Contents))
688 for _, key := range resp.Contents {
689 if !strings.HasPrefix(*key.Key, lister.Prefix) {
690 lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key)
693 lister.buf = append(lister.buf, key)
697 func (lister *s3awsLister) pop() (k *s3.Object) {
698 if len(lister.buf) > 0 {
700 lister.buf = lister.buf[1:]
705 // IndexTo writes a complete list of locators with the given prefix
706 // for which Get() can retrieve data.
707 func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
708 // Use a merge sort to find matching sets of X and recent/X.
709 dataL := s3awsLister{
713 PageSize: v.IndexPageSize,
714 Stats: &v.bucket.stats,
716 recentL := s3awsLister{
719 Prefix: "recent/" + prefix,
720 PageSize: v.IndexPageSize,
721 Stats: &v.bucket.stats,
723 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
724 if *data.Key >= "g" {
725 // Conveniently, "recent/*" and "trash/*" are
726 // lexically greater than all hex-encoded data
727 // hashes, so stopping here avoids iterating
728 // over all of them needlessly with dataL.
731 if !v.isKeepBlock(*data.Key) {
735 // stamp is the list entry we should use to report the
736 // last-modified time for this data block: it will be
737 // the recent/X entry if one exists, otherwise the
738 // entry for the data block itself.
741 // Advance to the corresponding recent/X marker, if any
742 for recent != nil && recentL.Error() == nil {
743 if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 {
744 recent = recentL.Next()
748 recent = recentL.Next()
751 // recent/X marker is missing: we'll
752 // use the timestamp on the data
757 if err := recentL.Error(); err != nil {
760 fmt.Fprintf(writer, "%s+%d %d\n", *data.Key, *data.Size, stamp.LastModified.UnixNano())
765 // Mtime returns the stored timestamp for the given locator.
766 func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
767 _, err := v.Head(loc)
769 return s3AWSZeroTime, v.translateError(err)
771 resp, err := v.Head("recent/" + loc)
772 err = v.translateError(err)
773 if os.IsNotExist(err) {
774 // The data object X exists, but recent/X is missing.
775 err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
777 v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
778 return s3AWSZeroTime, v.translateError(err)
780 v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+loc)
781 resp, err = v.Head("recent/" + loc)
783 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
784 return s3AWSZeroTime, v.translateError(err)
786 } else if err != nil {
787 // HEAD recent/X failed for some other reason.
788 return s3AWSZeroTime, err
790 return *resp.LastModified, err
793 // Status returns a *VolumeStatus representing the current in-use
794 // storage capacity and a fake available capacity that doesn't make
795 // the volume seem full or nearly-full.
796 func (v *S3AWSVolume) Status() *VolumeStatus {
797 return &VolumeStatus{
799 BytesFree: BlockSize * 1000,
804 // InternalStats returns bucket I/O and API call counters.
805 func (v *S3AWSVolume) InternalStats() interface{} {
806 return &v.bucket.stats
809 // Touch sets the timestamp for the given locator to the current time.
810 func (v *S3AWSVolume) Touch(loc string) error {
811 if v.volume.ReadOnly {
812 return MethodDisabledError
814 _, err := v.Head(loc)
815 err = v.translateError(err)
816 if os.IsNotExist(err) && v.fixRace(loc) {
817 // The data object got trashed in a race, but fixRace
819 } else if err != nil {
822 err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
823 return v.translateError(err)
826 // checkRaceWindow returns a non-nil error if trash/loc is, or might
827 // be, in the race window (i.e., it's not safe to trash loc).
828 func (v *S3AWSVolume) checkRaceWindow(loc string) error {
829 resp, err := v.Head("trash/" + loc)
830 err = v.translateError(err)
831 if os.IsNotExist(err) {
832 // OK, trash/X doesn't exist so we're not in the race
835 } else if err != nil {
836 // Error looking up trash/X. We don't know whether
837 // we're in the race window
840 t := resp.LastModified
841 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
843 // We can't count on "touch trash/X" to prolong
844 // trash/X's lifetime. The new timestamp might not
845 // become visible until now+raceWindow, and EmptyTrash
846 // is allowed to delete trash/X before then.
847 return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
849 // trash/X exists, but it won't be eligible for deletion until
850 // after now+raceWindow, so it's safe to overwrite it.
854 func (b *s3AWSbucket) Del(path string) error {
855 input := &s3.DeleteObjectInput{
856 Bucket: aws.String(b.bucket),
857 Key: aws.String(path),
859 req := b.svc.DeleteObjectRequest(input)
860 _, err := req.Send(context.Background())
861 //err := b.Bucket().Del(path)
862 b.stats.TickOps("delete")
863 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
868 // Trash a Keep block.
869 func (v *S3AWSVolume) Trash(loc string) error {
870 if v.volume.ReadOnly {
871 return MethodDisabledError
873 if t, err := v.Mtime(loc); err != nil {
875 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
878 if v.cluster.Collections.BlobTrashLifetime == 0 {
880 return ErrS3TrashDisabled
882 return v.translateError(v.bucket.Del(loc))
884 err := v.checkRaceWindow(loc)
888 err = v.safeCopy("trash/"+loc, loc)
892 return v.translateError(v.bucket.Del(loc))
895 // Untrash moves block from trash back into store
896 func (v *S3AWSVolume) Untrash(loc string) error {
897 err := v.safeCopy(loc, "trash/"+loc)
901 err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
902 return v.translateError(err)
905 type s3awsbucketStats struct {
915 func (s *s3awsbucketStats) TickErr(err error) {
919 errType := fmt.Sprintf("%T", err)
920 if aerr, ok := err.(awserr.Error); ok {
921 if reqErr, ok := err.(awserr.RequestFailure); ok {
922 // A service error occurred
923 errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code())
925 errType = errType + fmt.Sprintf(" 000 %s", aerr.Code())
928 s.statsTicker.TickErr(err, errType)