package main
import (
+ "bufio"
"bytes"
"context"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
- "flag"
+ "encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
"sync/atomic"
"time"
- "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.arvados.org/arvados.git/sdk/go/arvados"
"github.com/AdRoll/goamz/aws"
"github.com/AdRoll/goamz/s3"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
+func init() {
+ driver["S3"] = newS3Volume
+}
+
+func newS3Volume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
+ v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
+ err := json.Unmarshal(volume.DriverParameters, &v)
+ if err != nil {
+ return nil, err
+ }
+ v.logger = logger.WithField("Volume", v.String())
+ return v, v.check()
+}
+
+func (v *S3Volume) check() error {
+ if v.Bucket == "" {
+ return errors.New("DriverParameters: Bucket must be provided")
+ }
+ if v.IndexPageSize == 0 {
+ v.IndexPageSize = 1000
+ }
+ if v.RaceWindow < 0 {
+ return errors.New("DriverParameters: RaceWindow must not be negative")
+ }
+
+ var ok bool
+ v.region, ok = aws.Regions[v.Region]
+ if v.Endpoint == "" {
+ if !ok {
+ return fmt.Errorf("unrecognized region %+q; try specifying endpoint instead", v.Region)
+ }
+ } else if ok {
+ return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
+ "specify empty endpoint or use a different region name", v.Region, v.Endpoint)
+ } else {
+ v.region = aws.Region{
+ Name: v.Region,
+ S3Endpoint: v.Endpoint,
+ S3LocationConstraint: v.LocationConstraint,
+ }
+ }
+
+ // Zero timeouts mean "wait forever", which is a bad
+ // default. Default to long timeouts instead.
+ if v.ConnectTimeout == 0 {
+ v.ConnectTimeout = s3DefaultConnectTimeout
+ }
+ if v.ReadTimeout == 0 {
+ v.ReadTimeout = s3DefaultReadTimeout
+ }
+
+ v.bucket = &s3bucket{
+ bucket: &s3.Bucket{
+ S3: v.newS3Client(),
+ Name: v.Bucket,
+ },
+ }
+ // Set up prometheus metrics
+ lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+ v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
+
+ err := v.bootstrapIAMCredentials()
+ if err != nil {
+ return fmt.Errorf("error getting IAM credentials: %s", err)
+ }
+
+ return nil
+}
+
const (
s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
s3DefaultConnectTimeout = arvados.Duration(time.Minute)
var (
// ErrS3TrashDisabled is returned by Trash if that operation
// is impossible with the current config.
- ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
-
- s3AccessKeyFile string
- s3SecretKeyFile string
- s3RegionName string
- s3Endpoint string
- s3Replication int
- s3UnsafeDelete bool
- s3RaceWindow time.Duration
+ ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
s3ACL = s3.Private
nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
)
-type s3VolumeAdder struct {
- *Config
-}
-
-// String implements flag.Value
-func (s *s3VolumeAdder) String() string {
- return "-"
-}
-
-func (s *s3VolumeAdder) Set(bucketName string) error {
- if bucketName == "" {
- return fmt.Errorf("no container name given")
- }
- if s3AccessKeyFile == "" || s3SecretKeyFile == "" {
- return fmt.Errorf("-s3-access-key-file and -s3-secret-key-file arguments must given before -s3-bucket-volume")
- }
- if deprecated.flagSerializeIO {
- log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
- }
- s.Config.Volumes = append(s.Config.Volumes, &S3Volume{
- Bucket: bucketName,
- AccessKeyFile: s3AccessKeyFile,
- SecretKeyFile: s3SecretKeyFile,
- Endpoint: s3Endpoint,
- Region: s3RegionName,
- RaceWindow: arvados.Duration(s3RaceWindow),
- S3Replication: s3Replication,
- UnsafeDelete: s3UnsafeDelete,
- ReadOnly: deprecated.flagReadonly,
- IndexPageSize: 1000,
- })
- return nil
-}
-
func s3regions() (okList []string) {
for r := range aws.Regions {
okList = append(okList, r)
return
}
-func init() {
- VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &S3Volume{} })
-
- flag.Var(&s3VolumeAdder{theConfig},
- "s3-bucket-volume",
- "Use the given bucket as a storage volume. Can be given multiple times.")
- flag.StringVar(
- &s3RegionName,
- "s3-region",
- "",
- fmt.Sprintf("AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are %+q.", s3regions()))
- flag.StringVar(
- &s3Endpoint,
- "s3-endpoint",
- "",
- "Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use \"https://storage.googleapis.com\".")
- flag.StringVar(
- &s3AccessKeyFile,
- "s3-access-key-file",
- "",
- "`File` containing the access key used for subsequent -s3-bucket-volume arguments.")
- flag.StringVar(
- &s3SecretKeyFile,
- "s3-secret-key-file",
- "",
- "`File` containing the secret key used for subsequent -s3-bucket-volume arguments.")
- flag.DurationVar(
- &s3RaceWindow,
- "s3-race-window",
- 24*time.Hour,
- "Maximum eventual consistency latency for subsequent -s3-bucket-volume arguments.")
- flag.IntVar(
- &s3Replication,
- "s3-replication",
- 2,
- "Replication level reported to clients for subsequent -s3-bucket-volume arguments.")
- flag.BoolVar(
- &s3UnsafeDelete,
- "s3-unsafe-delete",
- false,
- "EXPERIMENTAL. Enable deletion (garbage collection) even when trash lifetime is zero, even though there are known race conditions that can cause data loss.")
-}
-
// S3Volume implements Volume using an S3 bucket.
type S3Volume struct {
- AccessKeyFile string
- SecretKeyFile string
- Endpoint string
- Region string
- Bucket string
- LocationConstraint bool
- IndexPageSize int
- S3Replication int
- ConnectTimeout arvados.Duration
- ReadTimeout arvados.Duration
- RaceWindow arvados.Duration
- ReadOnly bool
- UnsafeDelete bool
- StorageClasses []string
-
- bucket *s3bucket
-
+ arvados.S3VolumeDriverParameters
+ AuthToken string // populated automatically when IAMRole is used
+ AuthExpiration time.Time // populated automatically when IAMRole is used
+
+ cluster *arvados.Cluster
+ volume arvados.Volume
+ logger logrus.FieldLogger
+ metrics *volumeMetricsVecs
+ bucket *s3bucket
+ region aws.Region
startOnce sync.Once
}
-// Examples implements VolumeWithExamples.
-func (*S3Volume) Examples() []Volume {
- return []Volume{
- &S3Volume{
- AccessKeyFile: "/etc/aws_s3_access_key.txt",
- SecretKeyFile: "/etc/aws_s3_secret_key.txt",
- Endpoint: "",
- Region: "us-east-1",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
- ConnectTimeout: arvados.Duration(time.Minute),
- ReadTimeout: arvados.Duration(5 * time.Minute),
- },
- &S3Volume{
- AccessKeyFile: "/etc/gce_s3_access_key.txt",
- SecretKeyFile: "/etc/gce_s3_secret_key.txt",
- Endpoint: "https://storage.googleapis.com",
- Region: "",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
- ConnectTimeout: arvados.Duration(time.Minute),
- ReadTimeout: arvados.Duration(5 * time.Minute),
- },
+// GetDeviceID returns a globally unique ID for the storage bucket.
+func (v *S3Volume) GetDeviceID() string {
+ return "s3://" + v.Endpoint + "/" + v.Bucket
+}
+
+func (v *S3Volume) bootstrapIAMCredentials() error {
+ if v.AccessKey != "" || v.SecretKey != "" {
+ if v.IAMRole != "" {
+ return errors.New("invalid DriverParameters: AccessKey and SecretKey must be blank if IAMRole is specified")
+ }
+ return nil
+ }
+ ttl, err := v.updateIAMCredentials()
+ if err != nil {
+ return err
}
+ go func() {
+ for {
+ time.Sleep(ttl)
+ ttl, err = v.updateIAMCredentials()
+ if err != nil {
+ v.logger.WithError(err).Warnf("failed to update credentials for IAM role %q", v.IAMRole)
+ ttl = time.Second
+ } else if ttl < time.Second {
+ v.logger.WithField("TTL", ttl).Warnf("received stale credentials for IAM role %q", v.IAMRole)
+ ttl = time.Second
+ }
+ }
+ }()
+ return nil
}
-// Type implements Volume.
-func (*S3Volume) Type() string {
- return "S3"
+func (v *S3Volume) newS3Client() *s3.S3 {
+ auth := aws.NewAuth(v.AccessKey, v.SecretKey, v.AuthToken, v.AuthExpiration)
+ client := s3.New(*auth, v.region)
+ if !v.V2Signature {
+ client.Signature = aws.V4Signature
+ }
+ client.ConnectTimeout = time.Duration(v.ConnectTimeout)
+ client.ReadTimeout = time.Duration(v.ReadTimeout)
+ return client
}
-// Start populates private fields and verifies the configuration is
-// valid.
-func (v *S3Volume) Start(m *volumeMetrics) error {
- region, ok := aws.Regions[v.Region]
- if v.Endpoint == "" {
- if !ok {
- return fmt.Errorf("unrecognized region %+q; try specifying -s3-endpoint instead", v.Region)
- }
- } else if ok {
- return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
- "specify empty endpoint (\"-s3-endpoint=\") or use a different region name", v.Region, v.Endpoint)
+// returned by AWS metadata endpoint .../security-credentials/${rolename}
+type iamCredentials struct {
+ Code string
+ LastUpdated time.Time
+ Type string
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+ Expiration time.Time
+}
+
+// Returns TTL of updated credentials, i.e., time to sleep until next
+// update.
+func (v *S3Volume) updateIAMCredentials() (time.Duration, error) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+
+ metadataBaseURL := "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
+
+ var url string
+ if strings.Contains(v.IAMRole, "://") {
+ // Configuration provides complete URL (used by tests)
+ url = v.IAMRole
+ } else if v.IAMRole != "" {
+ // Configuration provides IAM role name and we use the
+ // AWS metadata endpoint
+ url = metadataBaseURL + v.IAMRole
} else {
- region = aws.Region{
- Name: v.Region,
- S3Endpoint: v.Endpoint,
- S3LocationConstraint: v.LocationConstraint,
+ url = metadataBaseURL
+ v.logger.WithField("URL", url).Debug("looking up IAM role name")
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return 0, fmt.Errorf("error setting up request %s: %s", url, err)
+ }
+ resp, err := http.DefaultClient.Do(req.WithContext(ctx))
+ if err != nil {
+ return 0, fmt.Errorf("error getting %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotFound {
+ return 0, fmt.Errorf("this instance does not have an IAM role assigned -- either assign a role, or configure AccessKey and SecretKey explicitly in DriverParameters (error getting %s: HTTP status %s)", url, resp.Status)
+ } else if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
+ }
+ body := bufio.NewReader(resp.Body)
+ var role string
+ _, err = fmt.Fscanf(body, "%s\n", &role)
+ if err != nil {
+ return 0, fmt.Errorf("error reading response from %s: %s", url, err)
+ }
+ if n, _ := body.Read(make([]byte, 64)); n > 0 {
+ v.logger.Warnf("ignoring additional data returned by metadata endpoint %s after the single role name that we expected", url)
}
+ v.logger.WithField("Role", role).Debug("looked up IAM role name")
+ url = url + role
}
- var err error
- var auth aws.Auth
- auth.AccessKey, err = readKeyFromFile(v.AccessKeyFile)
+ v.logger.WithField("URL", url).Debug("getting credentials")
+ req, err := http.NewRequest("GET", url, nil)
if err != nil {
- return err
+ return 0, fmt.Errorf("error setting up request %s: %s", url, err)
}
- auth.SecretKey, err = readKeyFromFile(v.SecretKeyFile)
+ resp, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
- return err
+ return 0, fmt.Errorf("error getting %s: %s", url, err)
}
-
- // Zero timeouts mean "wait forever", which is a bad
- // default. Default to long timeouts instead.
- if v.ConnectTimeout == 0 {
- v.ConnectTimeout = s3DefaultConnectTimeout
- }
- if v.ReadTimeout == 0 {
- v.ReadTimeout = s3DefaultReadTimeout
- }
-
- client := s3.New(auth, region)
- if region.EC2Endpoint.Signer == aws.V4Signature {
- // Currently affects only eu-central-1
- client.Signature = aws.V4Signature
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
}
- client.ConnectTimeout = time.Duration(v.ConnectTimeout)
- client.ReadTimeout = time.Duration(v.ReadTimeout)
- v.bucket = &s3bucket{
- Bucket: &s3.Bucket{
- S3: client,
- Name: v.Bucket,
- },
+ var cred iamCredentials
+ err = json.NewDecoder(resp.Body).Decode(&cred)
+ if err != nil {
+ return 0, fmt.Errorf("error decoding credentials from %s: %s", url, err)
}
- return nil
-}
-
-// DeviceID returns a globally unique ID for the storage bucket.
-func (v *S3Volume) DeviceID() string {
- return "s3://" + v.Endpoint + "/" + v.Bucket
+ v.AccessKey, v.SecretKey, v.AuthToken, v.AuthExpiration = cred.AccessKeyID, cred.SecretAccessKey, cred.Token, cred.Expiration
+ v.bucket.SetBucket(&s3.Bucket{
+ S3: v.newS3Client(),
+ Name: v.Bucket,
+ })
+ // TTL is time from now to expiration, minus 5m. "We make new
+ // credentials available at least five minutes before the
+ // expiration of the old credentials." --
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
+ // (If that's not true, the returned ttl might be zero or
+ // negative, which the caller can handle.)
+ ttl := cred.Expiration.Sub(time.Now()) - 5*time.Minute
+ v.logger.WithFields(logrus.Fields{
+ "AccessKeyID": cred.AccessKeyID,
+ "LastUpdated": cred.LastUpdated,
+ "Expiration": cred.Expiration,
+ "TTL": arvados.Duration(ttl),
+ }).Debug("updated credentials")
+ return ttl, nil
}
func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
case <-ready:
return
case <-ctx.Done():
- theConfig.debugLogf("s3: abandoning getReader(): %s", ctx.Err())
+ v.logger.Debugf("s3: abandoning getReader(): %s", ctx.Err())
go func() {
<-ready
if err == nil {
rdr, err = v.bucket.GetReader(loc)
if err != nil {
- log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
+ v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
err = v.translateError(err)
}
return
}()
select {
case <-ctx.Done():
- theConfig.debugLogf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
+ v.logger.Debugf("s3: interrupting ReadFull() with Close() because %s", ctx.Err())
rdr.Close()
// Must wait for ReadFull to return, to ensure it
// doesn't write to buf after we return.
- theConfig.debugLogf("s3: waiting for ReadFull() to fail")
+ v.logger.Debug("s3: waiting for ReadFull() to fail")
<-ready
return 0, ctx.Err()
case <-ready:
// Put writes a block.
func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
- if v.ReadOnly {
+ if v.volume.ReadOnly {
return MethodDisabledError
}
var opts s3.Options
go func() {
defer func() {
if ctx.Err() != nil {
- theConfig.debugLogf("%s: abandoned PutReader goroutine finished with err: %s", v, err)
+ v.logger.Debugf("abandoned PutReader goroutine finished with err: %s", err)
}
}()
defer close(ready)
}()
select {
case <-ctx.Done():
- theConfig.debugLogf("%s: taking PutReader's input away: %s", v, ctx.Err())
+ v.logger.Debugf("taking PutReader's input away: %s", ctx.Err())
// Our pipe might be stuck in Write(), waiting for
// PutReader() to read. If so, un-stick it. This means
// PutReader will get corrupt data, but that's OK: the
go io.Copy(ioutil.Discard, bufr)
// CloseWithError() will return once pending I/O is done.
bufw.CloseWithError(ctx.Err())
- theConfig.debugLogf("%s: abandoning PutReader goroutine", v)
+ v.logger.Debugf("abandoning PutReader goroutine")
return ctx.Err()
case <-ready:
// Unblock pipe in case PutReader did not consume it.
// Touch sets the timestamp for the given locator to the current time.
func (v *S3Volume) Touch(loc string) error {
- if v.ReadOnly {
+ if v.volume.ReadOnly {
return MethodDisabledError
}
_, err := v.bucket.Head(loc, nil)
// The data object X exists, but recent/X is missing.
err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
- log.Printf("error: creating %q: %s", "recent/"+loc, err)
+ v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
return zeroTime, v.translateError(err)
}
- log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+ v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+loc)
resp, err = v.bucket.Head("recent/"+loc, nil)
if err != nil {
- log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
+ v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
return zeroTime, v.translateError(err)
}
} else if err != nil {
func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
// Use a merge sort to find matching sets of X and recent/X.
dataL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Logger: v.logger,
+ Bucket: v.bucket.Bucket(),
Prefix: prefix,
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
}
recentL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Logger: v.logger,
+ Bucket: v.bucket.Bucket(),
Prefix: "recent/" + prefix,
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
// Trash a Keep block.
func (v *S3Volume) Trash(loc string) error {
- if v.ReadOnly {
+ if v.volume.ReadOnly {
return MethodDisabledError
}
if t, err := v.Mtime(loc); err != nil {
return err
- } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
+ } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
return nil
}
- if theConfig.TrashLifetime == 0 {
- if !s3UnsafeDelete {
+ if v.cluster.Collections.BlobTrashLifetime == 0 {
+ if !v.UnsafeDelete {
return ErrS3TrashDisabled
}
return v.translateError(v.bucket.Del(loc))
// Can't parse timestamp
return err
}
- safeWindow := t.Add(theConfig.TrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
+ safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow)))
if safeWindow <= 0 {
// We can't count on "touch trash/X" to prolong
// trash/X's lifetime. The new timestamp might not
// (PutCopy returns 200 OK if the request was received, even if the
// copy failed).
func (v *S3Volume) safeCopy(dst, src string) error {
- resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
+ resp, err := v.bucket.Bucket().PutCopy(dst, s3ACL, s3.CopyOptions{
ContentType: "application/octet-stream",
MetadataDirective: "REPLACE",
- }, v.bucket.Name+"/"+src)
+ }, v.bucket.Bucket().Name+"/"+src)
err = v.translateError(err)
if os.IsNotExist(err) {
return err
} else if err != nil {
- return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Name+"/"+src, err)
+ return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Bucket().Name+"/"+src, err)
}
if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
}
-// Writable returns false if all future Put, Mtime, and Delete calls
-// are expected to fail.
-func (v *S3Volume) Writable() bool {
- return !v.ReadOnly
-}
-
-// Replication returns the storage redundancy of the underlying
-// device. Configured via command line flag.
-func (v *S3Volume) Replication() int {
- return v.S3Replication
-}
-
-// GetStorageClasses implements Volume
-func (v *S3Volume) GetStorageClasses() []string {
- return v.StorageClasses
-}
-
var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
func (v *S3Volume) isKeepBlock(s string) bool {
trash, err := v.bucket.Head("trash/"+loc, nil)
if err != nil {
if !os.IsNotExist(v.translateError(err)) {
- log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+loc)
}
return false
}
trashTime, err := v.lastModified(trash)
if err != nil {
- log.Printf("error: fixRace: parse %q: %s", trash.Header.Get("Last-Modified"), err)
+ v.logger.WithError(err).Errorf("fixRace: error parsing time %q", trash.Header.Get("Last-Modified"))
return false
}
recent, err := v.bucket.Head("recent/"+loc, nil)
if err != nil {
- log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
+ v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+loc)
return false
}
recentTime, err := v.lastModified(recent)
if err != nil {
- log.Printf("error: fixRace: parse %q: %s", recent.Header.Get("Last-Modified"), err)
+ v.logger.WithError(err).Errorf("fixRace: error parsing time %q", recent.Header.Get("Last-Modified"))
return false
}
ageWhenTrashed := trashTime.Sub(recentTime)
- if ageWhenTrashed >= theConfig.BlobSignatureTTL.Duration() {
+ if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() {
// No evidence of a race: block hasn't been written
// since it became eligible for Trash. No fix needed.
return false
}
- log.Printf("notice: fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, theConfig.BlobSignatureTTL)
- log.Printf("notice: fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
+ v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL)
+ v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
err = v.safeCopy(loc, "trash/"+loc)
if err != nil {
- log.Printf("error: fixRace: %s", err)
+ v.logger.WithError(err).Error("fixRace: copy failed")
return false
}
return true
return err
}
-// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
// and deletes them from the volume.
func (v *S3Volume) EmptyTrash() {
+ if v.cluster.Collections.BlobDeleteConcurrency < 1 {
+ return
+ }
+
var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
// Define "ready to delete" as "...when EmptyTrash started".
trashT, err := time.Parse(time.RFC3339, trash.LastModified)
if err != nil {
- log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
+ v.logger.Warnf("EmptyTrash: %q: parse %q: %s", trash.Key, trash.LastModified, err)
return
}
recent, err := v.bucket.Head("recent/"+loc, nil)
if err != nil && os.IsNotExist(v.translateError(err)) {
- log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
+ v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
err = v.Untrash(loc)
if err != nil {
- log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
+ v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
}
return
} else if err != nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
return
}
recentT, err := v.lastModified(recent)
if err != nil {
- log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
+ v.logger.WithError(err).Warnf("EmptyTrash: %q: error parsing %q", "recent/"+loc, recent.Header.Get("Last-Modified"))
return
}
- if trashT.Sub(recentT) < theConfig.BlobSignatureTTL.Duration() {
- if age := startT.Sub(recentT); age >= theConfig.BlobSignatureTTL.Duration()-time.Duration(v.RaceWindow) {
+ if trashT.Sub(recentT) < v.cluster.Collections.BlobSigningTTL.Duration() {
+ if age := startT.Sub(recentT); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
// recent/loc is too old to protect
// loc from being Trashed again during
// the raceWindow that starts if we
// delete trash/X now.
//
- // Note this means (TrashCheckInterval
- // < BlobSignatureTTL - raceWindow) is
+ // Note this means (TrashSweepInterval
+ // < BlobSigningTTL - raceWindow) is
// necessary to avoid starvation.
- log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
+ v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
v.fixRace(loc)
v.Touch(loc)
return
}
_, err := v.bucket.Head(loc, nil)
if os.IsNotExist(err) {
- log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
+ v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc)
v.fixRace(loc)
return
} else if err != nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
return
}
}
- if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
+ if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
return
}
err = v.bucket.Del(trash.Key)
if err != nil {
- log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
+ v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", trash.Key)
return
}
atomic.AddInt64(&bytesDeleted, trash.Size)
_, err = v.bucket.Head(loc, nil)
if err == nil {
- log.Printf("warning: %s: EmptyTrash: HEAD %q succeeded immediately after deleting %q", v, loc, loc)
+ v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
return
}
if !os.IsNotExist(v.translateError(err)) {
- log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
+ v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", loc)
return
}
err = v.bucket.Del("recent/" + loc)
if err != nil {
- log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
+ v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
}
}
var wg sync.WaitGroup
- todo := make(chan *s3.Key, theConfig.EmptyTrashWorkers)
- for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
+ todo := make(chan *s3.Key, v.cluster.Collections.BlobDeleteConcurrency)
+ for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
}
trashL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Logger: v.logger,
+ Bucket: v.bucket.Bucket(),
Prefix: "trash/",
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
wg.Wait()
if err := trashL.Error(); err != nil {
- log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
+ v.logger.WithError(err).Error("EmptyTrash: lister failed")
}
- log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
type s3Lister struct {
+ Logger logrus.FieldLogger
Bucket *s3.Bucket
Prefix string
PageSize int
}
func (lister *s3Lister) getPage() {
+ lister.Stats.TickOps("list")
lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps)
resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
lister.nextMarker = ""
lister.buf = make([]s3.Key, 0, len(resp.Contents))
for _, key := range resp.Contents {
if !strings.HasPrefix(key.Key, lister.Prefix) {
- log.Printf("warning: s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
+ lister.Logger.Warnf("s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
continue
}
lister.buf = append(lister.buf, key)
return
}
-// s3bucket wraps s3.bucket and counts I/O and API usage stats.
+// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
+// wrapped bucket can be replaced atomically with SetBucket in order
+// to update credentials.
type s3bucket struct {
- *s3.Bucket
- stats s3bucketStats
+ bucket *s3.Bucket
+ stats s3bucketStats
+ mu sync.Mutex
+}
+
+func (b *s3bucket) Bucket() *s3.Bucket {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.bucket
+}
+
+func (b *s3bucket) SetBucket(bucket *s3.Bucket) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.bucket = bucket
}
func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
- rdr, err := b.Bucket.GetReader(path)
+ rdr, err := b.Bucket().GetReader(path)
+ b.stats.TickOps("get")
b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
b.stats.TickErr(err)
return NewCountingReader(rdr, b.stats.TickInBytes), err
}
func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
- resp, err := b.Bucket.Head(path, headers)
+ resp, err := b.Bucket().Head(path, headers)
+ b.stats.TickOps("head")
b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
b.stats.TickErr(err)
return resp, err
} else {
r = NewCountingReader(r, b.stats.TickOutBytes)
}
- err := b.Bucket.PutReader(path, r, length, contType, perm, options)
+ err := b.Bucket().PutReader(path, r, length, contType, perm, options)
+ b.stats.TickOps("put")
b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
b.stats.TickErr(err)
return err
}
func (b *s3bucket) Del(path string) error {
- err := b.Bucket.Del(path)
+ err := b.Bucket().Del(path)
+ b.stats.TickOps("delete")
b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
b.stats.TickErr(err)
return err