1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
27 "git.arvados.org/arvados.git/sdk/go/arvados"
28 "github.com/AdRoll/goamz/aws"
29 "github.com/AdRoll/goamz/s3"
30 "github.com/prometheus/client_golang/prometheus"
31 "github.com/sirupsen/logrus"
35 driver["S3"] = chooseS3VolumeDriver
38 func newS3Volume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
39 v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
40 err := json.Unmarshal(volume.DriverParameters, v)
44 v.logger = logger.WithField("Volume", v.String())
48 func (v *S3Volume) check() error {
50 return errors.New("DriverParameters: Bucket must be provided")
52 if v.IndexPageSize == 0 {
53 v.IndexPageSize = 1000
56 return errors.New("DriverParameters: RaceWindow must not be negative")
60 r, ok := aws.Regions[v.Region]
62 return fmt.Errorf("unrecognized region %+q; try specifying endpoint instead", v.Region)
66 v.region = aws.Region{
68 S3Endpoint: v.Endpoint,
69 S3LocationConstraint: v.LocationConstraint,
73 // Zero timeouts mean "wait forever", which is a bad
74 // default. Default to long timeouts instead.
75 if v.ConnectTimeout == 0 {
76 v.ConnectTimeout = s3DefaultConnectTimeout
78 if v.ReadTimeout == 0 {
79 v.ReadTimeout = s3DefaultReadTimeout
88 // Set up prometheus metrics
89 lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
90 v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
92 err := v.bootstrapIAMCredentials()
94 return fmt.Errorf("error getting IAM credentials: %s", err)
101 s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
102 s3DefaultConnectTimeout = arvados.Duration(time.Minute)
106 // ErrS3TrashDisabled is returned by Trash if that operation
107 // is impossible with the current config.
108 ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
116 maxClockSkew = 600 * time.Second
117 nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
120 func s3regions() (okList []string) {
121 for r := range aws.Regions {
122 okList = append(okList, r)
127 // S3Volume implements Volume using an S3 bucket.
128 type S3Volume struct {
129 arvados.S3VolumeDriverParameters
130 AuthToken string // populated automatically when IAMRole is used
131 AuthExpiration time.Time // populated automatically when IAMRole is used
133 cluster *arvados.Cluster
134 volume arvados.Volume
135 logger logrus.FieldLogger
136 metrics *volumeMetricsVecs
142 // GetDeviceID returns a globally unique ID for the storage bucket.
143 func (v *S3Volume) GetDeviceID() string {
144 return "s3://" + v.Endpoint + "/" + v.Bucket
147 func (v *S3Volume) bootstrapIAMCredentials() error {
148 if v.AccessKeyID != "" || v.SecretAccessKey != "" {
150 return errors.New("invalid DriverParameters: AccessKeyID and SecretAccessKey must be blank if IAMRole is specified")
154 ttl, err := v.updateIAMCredentials()
161 ttl, err = v.updateIAMCredentials()
163 v.logger.WithError(err).Warnf("failed to update credentials for IAM role %q", v.IAMRole)
165 } else if ttl < time.Second {
166 v.logger.WithField("TTL", ttl).Warnf("received stale credentials for IAM role %q", v.IAMRole)
174 func (v *S3Volume) newS3Client() *s3.S3 {
175 auth := aws.NewAuth(v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration)
176 client := s3.New(*auth, v.region)
178 client.Signature = aws.V4Signature
180 client.ConnectTimeout = time.Duration(v.ConnectTimeout)
181 client.ReadTimeout = time.Duration(v.ReadTimeout)
185 // returned by AWS metadata endpoint .../security-credentials/${rolename}
186 type iamCredentials struct {
188 LastUpdated time.Time
191 SecretAccessKey string
196 // Returns TTL of updated credentials, i.e., time to sleep until next
198 func (v *S3Volume) updateIAMCredentials() (time.Duration, error) {
199 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
202 metadataBaseURL := "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
205 if strings.Contains(v.IAMRole, "://") {
206 // Configuration provides complete URL (used by tests)
208 } else if v.IAMRole != "" {
209 // Configuration provides IAM role name and we use the
210 // AWS metadata endpoint
211 url = metadataBaseURL + v.IAMRole
213 url = metadataBaseURL
214 v.logger.WithField("URL", url).Debug("looking up IAM role name")
215 req, err := http.NewRequest("GET", url, nil)
217 return 0, fmt.Errorf("error setting up request %s: %s", url, err)
219 resp, err := http.DefaultClient.Do(req.WithContext(ctx))
221 return 0, fmt.Errorf("error getting %s: %s", url, err)
223 defer resp.Body.Close()
224 if resp.StatusCode == http.StatusNotFound {
225 return 0, fmt.Errorf("this instance does not have an IAM role assigned -- either assign a role, or configure AccessKeyID and SecretAccessKey explicitly in DriverParameters (error getting %s: HTTP status %s)", url, resp.Status)
226 } else if resp.StatusCode != http.StatusOK {
227 return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
229 body := bufio.NewReader(resp.Body)
231 _, err = fmt.Fscanf(body, "%s\n", &role)
233 return 0, fmt.Errorf("error reading response from %s: %s", url, err)
235 if n, _ := body.Read(make([]byte, 64)); n > 0 {
236 v.logger.Warnf("ignoring additional data returned by metadata endpoint %s after the single role name that we expected", url)
238 v.logger.WithField("Role", role).Debug("looked up IAM role name")
242 v.logger.WithField("URL", url).Debug("getting credentials")
243 req, err := http.NewRequest("GET", url, nil)
245 return 0, fmt.Errorf("error setting up request %s: %s", url, err)
247 resp, err := http.DefaultClient.Do(req.WithContext(ctx))
249 return 0, fmt.Errorf("error getting %s: %s", url, err)
251 defer resp.Body.Close()
252 if resp.StatusCode != http.StatusOK {
253 return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
255 var cred iamCredentials
256 err = json.NewDecoder(resp.Body).Decode(&cred)
258 return 0, fmt.Errorf("error decoding credentials from %s: %s", url, err)
260 v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration = cred.AccessKeyID, cred.SecretAccessKey, cred.Token, cred.Expiration
261 v.bucket.SetBucket(&s3.Bucket{
265 // TTL is time from now to expiration, minus 5m. "We make new
266 // credentials available at least five minutes before the
267 // expiration of the old credentials." --
268 // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
269 // (If that's not true, the returned ttl might be zero or
270 // negative, which the caller can handle.)
271 ttl := cred.Expiration.Sub(time.Now()) - 5*time.Minute
272 v.logger.WithFields(logrus.Fields{
273 "AccessKeyID": cred.AccessKeyID,
274 "LastUpdated": cred.LastUpdated,
275 "Expiration": cred.Expiration,
276 "TTL": arvados.Duration(ttl),
277 }).Debug("updated credentials")
281 func (v *S3Volume) getReaderWithContext(ctx context.Context, key string) (rdr io.ReadCloser, err error) {
282 ready := make(chan bool)
284 rdr, err = v.getReader(key)
291 v.logger.Debugf("s3: abandoning getReader(%s): %s", key, ctx.Err())
298 return nil, ctx.Err()
302 // getReader wraps (Bucket)GetReader.
304 // In situations where (Bucket)GetReader would fail because the block
305 // disappeared in a Trash race, getReader calls fixRace to recover the
306 // data, and tries again.
307 func (v *S3Volume) getReader(key string) (rdr io.ReadCloser, err error) {
308 rdr, err = v.bucket.GetReader(key)
309 err = v.translateError(err)
310 if err == nil || !os.IsNotExist(err) {
314 _, err = v.bucket.Head("recent/"+key, nil)
315 err = v.translateError(err)
317 // If we can't read recent/X, there's no point in
318 // trying fixRace. Give up.
326 rdr, err = v.bucket.GetReader(key)
328 v.logger.Warnf("reading %s after successful fixRace: %s", key, err)
329 err = v.translateError(err)
334 // Get a block: copy the block data into buf, and return the number of
336 func (v *S3Volume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
338 rdr, err := v.getReaderWithContext(ctx, key)
344 ready := make(chan bool)
349 n, err = io.ReadFull(rdr, buf)
352 case nil, io.EOF, io.ErrUnexpectedEOF:
355 err = v.translateError(err)
360 v.logger.Debugf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
362 // Must wait for ReadFull to return, to ensure it
363 // doesn't write to buf after we return.
364 v.logger.Debug("s3: waiting for ReadFull() to fail")
372 // Compare the given data with the stored data.
373 func (v *S3Volume) Compare(ctx context.Context, loc string, expect []byte) error {
375 errChan := make(chan error, 1)
377 _, err := v.bucket.Head("recent/"+key, nil)
384 case err = <-errChan:
387 // Checking for "loc" itself here would interfere with
388 // future GET requests.
390 // On AWS, if X doesn't exist, a HEAD or GET request
391 // for X causes X's non-existence to be cached. Thus,
392 // if we test for X, then create X and return a
393 // signature to our client, the client might still get
394 // 404 from all keepstores when trying to read it.
396 // To avoid this, we avoid doing HEAD X or GET X until
397 // we know X has been written.
399 // Note that X might exist even though recent/X
400 // doesn't: for example, the response to HEAD recent/X
401 // might itself come from a stale cache. In such
402 // cases, we will return a false negative and
403 // PutHandler might needlessly create another replica
404 // on a different volume. That's not ideal, but it's
405 // better than passing the eventually-consistent
406 // problem on to our clients.
407 return v.translateError(err)
409 rdr, err := v.getReaderWithContext(ctx, key)
414 return v.translateError(compareReaderWithBuf(ctx, rdr, expect, loc[:32]))
417 // Put writes a block.
418 func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
419 if v.volume.ReadOnly {
420 return MethodDisabledError
425 md5, err := hex.DecodeString(loc)
429 opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
430 // In AWS regions that use V4 signatures, we need to
431 // provide ContentSHA256 up front. Otherwise, the S3
432 // library reads the request body (from our buffer)
433 // into another new buffer in order to compute the
434 // SHA256 before sending the request -- which would
435 // mean consuming 128 MiB of memory for the duration
436 // of a 64 MiB write.
437 opts.ContentSHA256 = fmt.Sprintf("%x", sha256.Sum256(block))
442 // Send the block data through a pipe, so that (if we need to)
443 // we can close the pipe early and abandon our PutReader()
444 // goroutine, without worrying about PutReader() accessing our
445 // block buffer after we release it.
446 bufr, bufw := io.Pipe()
448 io.Copy(bufw, bytes.NewReader(block))
453 ready := make(chan bool)
456 if ctx.Err() != nil {
457 v.logger.Debugf("abandoned PutReader goroutine finished with err: %s", err)
461 err = v.bucket.PutReader(key, bufr, int64(size), "application/octet-stream", s3ACL, opts)
465 err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
469 v.logger.Debugf("taking PutReader's input away: %s", ctx.Err())
470 // Our pipe might be stuck in Write(), waiting for
471 // PutReader() to read. If so, un-stick it. This means
472 // PutReader will get corrupt data, but that's OK: the
473 // size and MD5 won't match, so the write will fail.
474 go io.Copy(ioutil.Discard, bufr)
475 // CloseWithError() will return once pending I/O is done.
476 bufw.CloseWithError(ctx.Err())
477 v.logger.Debugf("abandoning PutReader goroutine")
480 // Unblock pipe in case PutReader did not consume it.
481 io.Copy(ioutil.Discard, bufr)
482 return v.translateError(err)
486 // Touch sets the timestamp for the given locator to the current time.
487 func (v *S3Volume) Touch(loc string) error {
488 if v.volume.ReadOnly {
489 return MethodDisabledError
492 _, err := v.bucket.Head(key, nil)
493 err = v.translateError(err)
494 if os.IsNotExist(err) && v.fixRace(key) {
495 // The data object got trashed in a race, but fixRace
497 } else if err != nil {
500 err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
501 return v.translateError(err)
504 // Mtime returns the stored timestamp for the given locator.
505 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
507 _, err := v.bucket.Head(key, nil)
509 return zeroTime, v.translateError(err)
511 resp, err := v.bucket.Head("recent/"+key, nil)
512 err = v.translateError(err)
513 if os.IsNotExist(err) {
514 // The data object X exists, but recent/X is missing.
515 err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
517 v.logger.WithError(err).Errorf("error creating %q", "recent/"+key)
518 return zeroTime, v.translateError(err)
520 v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+key)
521 resp, err = v.bucket.Head("recent/"+key, nil)
523 v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key)
524 return zeroTime, v.translateError(err)
526 } else if err != nil {
527 // HEAD recent/X failed for some other reason.
530 return v.lastModified(resp)
533 // IndexTo writes a complete list of locators with the given prefix
534 // for which Get() can retrieve data.
535 func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
536 // Use a merge sort to find matching sets of X and recent/X.
539 Bucket: v.bucket.Bucket(),
540 Prefix: v.key(prefix),
541 PageSize: v.IndexPageSize,
542 Stats: &v.bucket.stats,
546 Bucket: v.bucket.Bucket(),
547 Prefix: "recent/" + v.key(prefix),
548 PageSize: v.IndexPageSize,
549 Stats: &v.bucket.stats,
551 for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
553 // Conveniently, "recent/*" and "trash/*" are
554 // lexically greater than all hex-encoded data
555 // hashes, so stopping here avoids iterating
556 // over all of them needlessly with dataL.
559 loc, isBlk := v.isKeepBlock(data.Key)
564 // stamp is the list entry we should use to report the
565 // last-modified time for this data block: it will be
566 // the recent/X entry if one exists, otherwise the
567 // entry for the data block itself.
570 // Advance to the corresponding recent/X marker, if any
571 for recent != nil && recentL.Error() == nil {
572 if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
573 recent = recentL.Next()
577 recent = recentL.Next()
580 // recent/X marker is missing: we'll
581 // use the timestamp on the data
586 if err := recentL.Error(); err != nil {
589 t, err := time.Parse(time.RFC3339, stamp.LastModified)
593 // We truncate sub-second precision here. Otherwise
594 // timestamps will never match the RFC1123-formatted
595 // Last-Modified values parsed by Mtime().
596 fmt.Fprintf(writer, "%s+%d %d\n", loc, data.Size, t.Unix()*1000000000)
601 // Trash a Keep block.
602 func (v *S3Volume) Trash(loc string) error {
603 if v.volume.ReadOnly {
604 return MethodDisabledError
606 if t, err := v.Mtime(loc); err != nil {
608 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
612 if v.cluster.Collections.BlobTrashLifetime == 0 {
614 return ErrS3TrashDisabled
616 return v.translateError(v.bucket.Del(key))
618 err := v.checkRaceWindow(key)
622 err = v.safeCopy("trash/"+key, key)
626 return v.translateError(v.bucket.Del(key))
629 // checkRaceWindow returns a non-nil error if trash/key is, or might
630 // be, in the race window (i.e., it's not safe to trash key).
631 func (v *S3Volume) checkRaceWindow(key string) error {
632 resp, err := v.bucket.Head("trash/"+key, nil)
633 err = v.translateError(err)
634 if os.IsNotExist(err) {
635 // OK, trash/X doesn't exist so we're not in the race
638 } else if err != nil {
639 // Error looking up trash/X. We don't know whether
640 // we're in the race window
643 t, err := v.lastModified(resp)
645 // Can't parse timestamp
648 safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
650 // We can't count on "touch trash/X" to prolong
651 // trash/X's lifetime. The new timestamp might not
652 // become visible until now+raceWindow, and EmptyTrash
653 // is allowed to delete trash/X before then.
654 return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow)
656 // trash/X exists, but it won't be eligible for deletion until
657 // after now+raceWindow, so it's safe to overwrite it.
661 // safeCopy calls PutCopy, and checks the response to make sure the
662 // copy succeeded and updated the timestamp on the destination object
663 // (PutCopy returns 200 OK if the request was received, even if the
665 func (v *S3Volume) safeCopy(dst, src string) error {
666 resp, err := v.bucket.Bucket().PutCopy(dst, s3ACL, s3.CopyOptions{
667 ContentType: "application/octet-stream",
668 MetadataDirective: "REPLACE",
669 }, v.bucket.Bucket().Name+"/"+src)
670 err = v.translateError(err)
671 if os.IsNotExist(err) {
673 } else if err != nil {
674 return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Bucket().Name+"/"+src, err)
676 if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
677 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
678 } else if time.Now().Sub(t) > maxClockSkew {
679 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
684 // Get the LastModified header from resp, and parse it as RFC1123 or
685 // -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
686 func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
687 s := resp.Header.Get("Last-Modified")
688 t, err = time.Parse(time.RFC1123, s)
689 if err != nil && s != "" {
690 // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
691 // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
692 // as required by HTTP spec. If it's not a valid HTTP
693 // header value, it's probably AWS (or s3test) giving
694 // us a nearly-RFC1123 timestamp.
695 t, err = time.Parse(nearlyRFC1123, s)
700 // Untrash moves block from trash back into store
701 func (v *S3Volume) Untrash(loc string) error {
703 err := v.safeCopy(key, "trash/"+key)
707 err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
708 return v.translateError(err)
711 // Status returns a *VolumeStatus representing the current in-use
712 // storage capacity and a fake available capacity that doesn't make
713 // the volume seem full or nearly-full.
714 func (v *S3Volume) Status() *VolumeStatus {
715 return &VolumeStatus{
717 BytesFree: BlockSize * 1000,
722 // InternalStats returns bucket I/O and API call counters.
723 func (v *S3Volume) InternalStats() interface{} {
724 return &v.bucket.stats
727 // String implements fmt.Stringer.
728 func (v *S3Volume) String() string {
729 return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
732 var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
734 func (v *S3Volume) isKeepBlock(s string) (string, bool) {
735 if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
736 s = s[v.PrefixLength+1:]
738 return s, s3KeepBlockRegexp.MatchString(s)
741 // Return the key used for a given loc. If PrefixLength==0 then
742 // key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
743 // "abc/abcdef0123", etc.
744 func (v *S3Volume) key(loc string) string {
745 if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
746 return loc[:v.PrefixLength] + "/" + loc
752 // fixRace(X) is called when "recent/X" exists but "X" doesn't
753 // exist. If the timestamps on "recent/X" and "trash/X" indicate there
754 // was a race between Put and Trash, fixRace recovers from the race by
755 // Untrashing the block.
756 func (v *S3Volume) fixRace(key string) bool {
757 trash, err := v.bucket.Head("trash/"+key, nil)
759 if !os.IsNotExist(v.translateError(err)) {
760 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key)
764 trashTime, err := v.lastModified(trash)
766 v.logger.WithError(err).Errorf("fixRace: error parsing time %q", trash.Header.Get("Last-Modified"))
770 recent, err := v.bucket.Head("recent/"+key, nil)
772 v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key)
775 recentTime, err := v.lastModified(recent)
777 v.logger.WithError(err).Errorf("fixRace: error parsing time %q", recent.Header.Get("Last-Modified"))
781 ageWhenTrashed := trashTime.Sub(recentTime)
782 if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
783 // No evidence of a race: block hasn't been written
784 // since it became eligible for Trash. No fix needed.
788 v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
789 v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key)
790 err = v.safeCopy(key, "trash/"+key)
792 v.logger.WithError(err).Error("fixRace: copy failed")
798 func (v *S3Volume) translateError(err error) error {
799 switch err := err.(type) {
801 if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") ||
802 strings.Contains(err.Error(), "Not Found") {
803 return os.ErrNotExist
805 // Other 404 errors like NoSuchVersion and
806 // NoSuchBucket are different problems which should
807 // get called out downstream, so we don't convert them
808 // to os.ErrNotExist.
813 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
814 // and deletes them from the volume.
815 func (v *S3Volume) EmptyTrash() {
816 if v.cluster.Collections.BlobDeleteConcurrency < 1 {
820 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
822 // Define "ready to delete" as "...when EmptyTrash started".
825 emptyOneKey := func(trash *s3.Key) {
827 loc, isBlk := v.isKeepBlock(key)
831 atomic.AddInt64(&bytesInTrash, trash.Size)
832 atomic.AddInt64(&blocksInTrash, 1)
834 trashT, err := time.Parse(time.RFC3339, trash.LastModified)
836 v.logger.Warnf("EmptyTrash: %q: parse %q: %s", trash.Key, trash.LastModified, err)
839 recent, err := v.bucket.Head("recent/"+key, nil)
840 if err != nil && os.IsNotExist(v.translateError(err)) {
841 v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
844 v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
847 } else if err != nil {
848 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key)
851 recentT, err := v.lastModified(recent)
853 v.logger.WithError(err).Warnf("EmptyTrash: %q: error parsing %q", "recent/"+key, recent.Header.Get("Last-Modified"))
856 if trashT.Sub(recentT) < v.cluster.Collections.BlobSigningTTL.Duration() {
857 if age := startT.Sub(recentT); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
858 // recent/loc is too old to protect
859 // loc from being Trashed again during
860 // the raceWindow that starts if we
861 // delete trash/X now.
863 // Note this means (TrashSweepInterval
864 // < BlobSigningTTL - raceWindow) is
865 // necessary to avoid starvation.
866 v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
871 _, err := v.bucket.Head(key, nil)
872 if os.IsNotExist(err) {
873 v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
876 } else if err != nil {
877 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
881 if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
884 err = v.bucket.Del(trash.Key)
886 v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", trash.Key)
889 atomic.AddInt64(&bytesDeleted, trash.Size)
890 atomic.AddInt64(&blocksDeleted, 1)
892 _, err = v.bucket.Head(key, nil)
894 v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", key, key)
897 if !os.IsNotExist(v.translateError(err)) {
898 v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", key)
901 err = v.bucket.Del("recent/" + key)
903 v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+key)
907 var wg sync.WaitGroup
908 todo := make(chan *s3.Key, v.cluster.Collections.BlobDeleteConcurrency)
909 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
913 for key := range todo {
921 Bucket: v.bucket.Bucket(),
923 PageSize: v.IndexPageSize,
924 Stats: &v.bucket.stats,
926 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
932 if err := trashL.Error(); err != nil {
933 v.logger.WithError(err).Error("EmptyTrash: lister failed")
935 v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
938 type s3Lister struct {
939 Logger logrus.FieldLogger
949 // First fetches the first page and returns the first item. It returns
950 // nil if the response is the empty set or an error occurs.
951 func (lister *s3Lister) First() *s3.Key {
956 // Next returns the next item, fetching the next page if necessary. It
957 // returns nil if the last available item has already been fetched, or
959 func (lister *s3Lister) Next() *s3.Key {
960 if len(lister.buf) == 0 && lister.nextMarker != "" {
966 // Return the most recent error encountered by First or Next.
967 func (lister *s3Lister) Error() error {
971 func (lister *s3Lister) getPage() {
972 lister.Stats.TickOps("list")
973 lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
974 resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
975 lister.nextMarker = ""
980 if resp.IsTruncated {
981 lister.nextMarker = resp.NextMarker
983 lister.buf = make([]s3.Key, 0, len(resp.Contents))
984 for _, key := range resp.Contents {
985 if !strings.HasPrefix(key.Key, lister.Prefix) {
986 lister.Logger.Warnf("s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
989 lister.buf = append(lister.buf, key)
993 func (lister *s3Lister) pop() (k *s3.Key) {
994 if len(lister.buf) > 0 {
996 lister.buf = lister.buf[1:]
1001 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
1002 // wrapped bucket can be replaced atomically with SetBucket in order
1003 // to update credentials.
1004 type s3bucket struct {
1010 func (b *s3bucket) Bucket() *s3.Bucket {
1016 func (b *s3bucket) SetBucket(bucket *s3.Bucket) {
1022 func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
1023 rdr, err := b.Bucket().GetReader(path)
1024 b.stats.TickOps("get")
1025 b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
1026 b.stats.TickErr(err)
1027 return NewCountingReader(rdr, b.stats.TickInBytes), err
1030 func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
1031 resp, err := b.Bucket().Head(path, headers)
1032 b.stats.TickOps("head")
1033 b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
1034 b.stats.TickErr(err)
1038 func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
1040 // goamz will only send Content-Length: 0 when reader
1041 // is nil due to net.http.Request.ContentLength
1042 // behavior. Otherwise, Content-Length header is
1043 // omitted which will cause some S3 services
1044 // (including AWS and Ceph RadosGW) to fail to create
1048 r = NewCountingReader(r, b.stats.TickOutBytes)
1050 err := b.Bucket().PutReader(path, r, length, contType, perm, options)
1051 b.stats.TickOps("put")
1052 b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
1053 b.stats.TickErr(err)
1057 func (b *s3bucket) Del(path string) error {
1058 err := b.Bucket().Del(path)
1059 b.stats.TickOps("delete")
1060 b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
1061 b.stats.TickErr(err)
1065 type s3bucketStats struct {
1075 func (s *s3bucketStats) TickErr(err error) {
1079 errType := fmt.Sprintf("%T", err)
1080 if err, ok := err.(*s3.Error); ok {
1081 errType = errType + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
1083 s.statsTicker.TickErr(err, errType)