X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/0515919a3da565c7bf5087eca38f47bc4422c260..62168c2db5c36de2362cd1d5785b598b187bbef3:/services/keepstore/s3_volume.go diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go index 7873764004..dc857c3264 100644 --- a/services/keepstore/s3_volume.go +++ b/services/keepstore/s3_volume.go @@ -5,18 +5,14 @@ package keepstore import ( - "bufio" "bytes" "context" - "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" - "io/ioutil" - "net/http" "os" "regexp" "strings" @@ -25,821 +21,246 @@ import ( "time" "git.arvados.org/arvados.git/sdk/go/arvados" - "github.com/AdRoll/goamz/aws" - "github.com/AdRoll/goamz/s3" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + "github.com/aws/aws-sdk-go-v2/aws/ec2metadata" + "github.com/aws/aws-sdk-go-v2/aws/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/aws/endpoints" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/s3manager" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" ) func init() { - driver["S3"] = chooseS3VolumeDriver -} - -func newS3Volume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) { - v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics} - err := json.Unmarshal(volume.DriverParameters, v) - if err != nil { - return nil, err - } - v.logger = logger.WithField("Volume", v.String()) - return v, v.check() -} - -func (v *S3Volume) check() error { - if v.Bucket == "" { - return errors.New("DriverParameters: Bucket must be provided") - } - if v.IndexPageSize == 0 { - v.IndexPageSize = 1000 - } - if v.RaceWindow < 0 { - return errors.New("DriverParameters: RaceWindow must not be negative") - } - - if v.Endpoint == "" { - r, ok := aws.Regions[v.Region] - if !ok { - return fmt.Errorf("unrecognized region %+q; try specifying endpoint instead", v.Region) - } - v.region = r - } else { - v.region = aws.Region{ - Name: v.Region, - S3Endpoint: v.Endpoint, - S3LocationConstraint: v.LocationConstraint, - } - } - - // Zero timeouts mean "wait forever", which is a bad - // default. Default to long timeouts instead. - if v.ConnectTimeout == 0 { - v.ConnectTimeout = s3DefaultConnectTimeout - } - if v.ReadTimeout == 0 { - v.ReadTimeout = s3DefaultReadTimeout - } - - v.bucket = &s3bucket{ - bucket: &s3.Bucket{ - S3: v.newS3Client(), - Name: v.Bucket, - }, - } - // Set up prometheus metrics - lbls := prometheus.Labels{"device_id": v.GetDeviceID()} - v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls) - - err := v.bootstrapIAMCredentials() - if err != nil { - return fmt.Errorf("error getting IAM credentials: %s", err) - } - - return nil + driver["S3"] = news3Volume } const ( - s3DefaultReadTimeout = arvados.Duration(10 * time.Minute) - s3DefaultConnectTimeout = arvados.Duration(time.Minute) + s3DefaultReadTimeout = arvados.Duration(10 * time.Minute) + s3DefaultConnectTimeout = arvados.Duration(time.Minute) + maxClockSkew = 600 * time.Second + nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT" + s3downloaderPartSize = 6 * 1024 * 1024 + s3downloaderReadConcurrency = 11 + s3uploaderPartSize = 5 * 1024 * 1024 + s3uploaderWriteConcurrency = 5 ) var ( - // ErrS3TrashDisabled is returned by Trash if that operation - // is impossible with the current config. - ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false") - - s3ACL = s3.Private - - zeroTime time.Time + errS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false") + s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`) + s3AWSZeroTime time.Time ) -const ( - maxClockSkew = 600 * time.Second - nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT" -) - -func s3regions() (okList []string) { - for r := range aws.Regions { - okList = append(okList, r) - } - return -} - -// S3Volume implements Volume using an S3 bucket. -type S3Volume struct { +// s3Volume implements Volume using an S3 bucket. +type s3Volume struct { arvados.S3VolumeDriverParameters AuthToken string // populated automatically when IAMRole is used AuthExpiration time.Time // populated automatically when IAMRole is used - cluster *arvados.Cluster - volume arvados.Volume - logger logrus.FieldLogger - metrics *volumeMetricsVecs - bucket *s3bucket - region aws.Region - startOnce sync.Once -} - -// GetDeviceID returns a globally unique ID for the storage bucket. -func (v *S3Volume) GetDeviceID() string { - return "s3://" + v.Endpoint + "/" + v.Bucket + cluster *arvados.Cluster + volume arvados.Volume + logger logrus.FieldLogger + metrics *volumeMetricsVecs + bufferPool *bufferPool + bucket *s3Bucket + region string + startOnce sync.Once } -func (v *S3Volume) bootstrapIAMCredentials() error { - if v.AccessKeyID != "" || v.SecretAccessKey != "" { - if v.IAMRole != "" { - return errors.New("invalid DriverParameters: AccessKeyID and SecretAccessKey must be blank if IAMRole is specified") - } - return nil - } - ttl, err := v.updateIAMCredentials() - if err != nil { - return err - } - go func() { - for { - time.Sleep(ttl) - ttl, err = v.updateIAMCredentials() - if err != nil { - v.logger.WithError(err).Warnf("failed to update credentials for IAM role %q", v.IAMRole) - ttl = time.Second - } else if ttl < time.Second { - v.logger.WithField("TTL", ttl).Warnf("received stale credentials for IAM role %q", v.IAMRole) - ttl = time.Second - } - } - }() - return nil +// s3bucket wraps s3.bucket and counts I/O and API usage stats. The +// wrapped bucket can be replaced atomically with SetBucket in order +// to update credentials. +type s3Bucket struct { + bucket string + svc *s3.Client + stats s3awsbucketStats + mu sync.Mutex } -func (v *S3Volume) newS3Client() *s3.S3 { - auth := aws.NewAuth(v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration) - client := s3.New(*auth, v.region) - if !v.V2Signature { - client.Signature = aws.V4Signature +func (v *s3Volume) isKeepBlock(s string) (string, bool) { + if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] { + s = s[v.PrefixLength+1:] } - client.ConnectTimeout = time.Duration(v.ConnectTimeout) - client.ReadTimeout = time.Duration(v.ReadTimeout) - return client + return s, s3AWSKeepBlockRegexp.MatchString(s) } -// returned by AWS metadata endpoint .../security-credentials/${rolename} -type iamCredentials struct { - Code string - LastUpdated time.Time - Type string - AccessKeyID string - SecretAccessKey string - Token string - Expiration time.Time -} - -// Returns TTL of updated credentials, i.e., time to sleep until next -// update. -func (v *S3Volume) updateIAMCredentials() (time.Duration, error) { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute)) - defer cancel() - - metadataBaseURL := "http://169.254.169.254/latest/meta-data/iam/security-credentials/" - - var url string - if strings.Contains(v.IAMRole, "://") { - // Configuration provides complete URL (used by tests) - url = v.IAMRole - } else if v.IAMRole != "" { - // Configuration provides IAM role name and we use the - // AWS metadata endpoint - url = metadataBaseURL + v.IAMRole +// Return the key used for a given loc. If PrefixLength==0 then +// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is +// "abc/abcdef0123", etc. +func (v *s3Volume) key(loc string) string { + if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 { + return loc[:v.PrefixLength] + "/" + loc } else { - url = metadataBaseURL - v.logger.WithField("URL", url).Debug("looking up IAM role name") - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return 0, fmt.Errorf("error setting up request %s: %s", url, err) - } - resp, err := http.DefaultClient.Do(req.WithContext(ctx)) - if err != nil { - return 0, fmt.Errorf("error getting %s: %s", url, err) - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotFound { - return 0, fmt.Errorf("this instance does not have an IAM role assigned -- either assign a role, or configure AccessKeyID and SecretAccessKey explicitly in DriverParameters (error getting %s: HTTP status %s)", url, resp.Status) - } else if resp.StatusCode != http.StatusOK { - return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status) - } - body := bufio.NewReader(resp.Body) - var role string - _, err = fmt.Fscanf(body, "%s\n", &role) - if err != nil { - return 0, fmt.Errorf("error reading response from %s: %s", url, err) - } - if n, _ := body.Read(make([]byte, 64)); n > 0 { - v.logger.Warnf("ignoring additional data returned by metadata endpoint %s after the single role name that we expected", url) - } - v.logger.WithField("Role", role).Debug("looked up IAM role name") - url = url + role - } - - v.logger.WithField("URL", url).Debug("getting credentials") - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return 0, fmt.Errorf("error setting up request %s: %s", url, err) - } - resp, err := http.DefaultClient.Do(req.WithContext(ctx)) - if err != nil { - return 0, fmt.Errorf("error getting %s: %s", url, err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status) - } - var cred iamCredentials - err = json.NewDecoder(resp.Body).Decode(&cred) - if err != nil { - return 0, fmt.Errorf("error decoding credentials from %s: %s", url, err) - } - v.AccessKeyID, v.SecretAccessKey, v.AuthToken, v.AuthExpiration = cred.AccessKeyID, cred.SecretAccessKey, cred.Token, cred.Expiration - v.bucket.SetBucket(&s3.Bucket{ - S3: v.newS3Client(), - Name: v.Bucket, - }) - // TTL is time from now to expiration, minus 5m. "We make new - // credentials available at least five minutes before the - // expiration of the old credentials." -- - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials - // (If that's not true, the returned ttl might be zero or - // negative, which the caller can handle.) - ttl := cred.Expiration.Sub(time.Now()) - 5*time.Minute - v.logger.WithFields(logrus.Fields{ - "AccessKeyID": cred.AccessKeyID, - "LastUpdated": cred.LastUpdated, - "Expiration": cred.Expiration, - "TTL": arvados.Duration(ttl), - }).Debug("updated credentials") - return ttl, nil -} - -func (v *S3Volume) getReaderWithContext(ctx context.Context, key string) (rdr io.ReadCloser, err error) { - ready := make(chan bool) - go func() { - rdr, err = v.getReader(key) - close(ready) - }() - select { - case <-ready: - return - case <-ctx.Done(): - v.logger.Debugf("s3: abandoning getReader(%s): %s", key, ctx.Err()) - go func() { - <-ready - if err == nil { - rdr.Close() - } - }() - return nil, ctx.Err() + return loc } } -// getReader wraps (Bucket)GetReader. -// -// In situations where (Bucket)GetReader would fail because the block -// disappeared in a Trash race, getReader calls fixRace to recover the -// data, and tries again. -func (v *S3Volume) getReader(key string) (rdr io.ReadCloser, err error) { - rdr, err = v.bucket.GetReader(key) - err = v.translateError(err) - if err == nil || !os.IsNotExist(err) { - return +func news3Volume(params newVolumeParams) (volume, error) { + v := &s3Volume{ + cluster: params.Cluster, + volume: params.ConfigVolume, + metrics: params.MetricsVecs, + bufferPool: params.BufferPool, } - - _, err = v.bucket.Head("recent/"+key, nil) - err = v.translateError(err) + err := json.Unmarshal(params.ConfigVolume.DriverParameters, v) if err != nil { - // If we can't read recent/X, there's no point in - // trying fixRace. Give up. - return - } - if !v.fixRace(key) { - err = os.ErrNotExist - return - } - - rdr, err = v.bucket.GetReader(key) - if err != nil { - v.logger.Warnf("reading %s after successful fixRace: %s", key, err) - err = v.translateError(err) + return nil, err } - return + v.logger = params.Logger.WithField("Volume", v.DeviceID()) + return v, v.check("") } -// Get a block: copy the block data into buf, and return the number of -// bytes copied. -func (v *S3Volume) Get(ctx context.Context, loc string, buf []byte) (int, error) { - key := v.key(loc) - rdr, err := v.getReaderWithContext(ctx, key) - if err != nil { - return 0, err - } - - var n int - ready := make(chan bool) - go func() { - defer close(ready) - - defer rdr.Close() - n, err = io.ReadFull(rdr, buf) - - switch err { - case nil, io.EOF, io.ErrUnexpectedEOF: - err = nil - default: - err = v.translateError(err) +func (v *s3Volume) translateError(err error) error { + if _, ok := err.(*aws.RequestCanceledError); ok { + return context.Canceled + } else if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() == "NotFound" { + return os.ErrNotExist + } else if aerr.Code() == "NoSuchKey" { + return os.ErrNotExist } - }() - select { - case <-ctx.Done(): - v.logger.Debugf("s3: interrupting ReadFull() with Close() because %s", ctx.Err()) - rdr.Close() - // Must wait for ReadFull to return, to ensure it - // doesn't write to buf after we return. - v.logger.Debug("s3: waiting for ReadFull() to fail") - <-ready - return 0, ctx.Err() - case <-ready: - return n, err - } -} - -// Compare the given data with the stored data. -func (v *S3Volume) Compare(ctx context.Context, loc string, expect []byte) error { - key := v.key(loc) - errChan := make(chan error, 1) - go func() { - _, err := v.bucket.Head("recent/"+key, nil) - errChan <- err - }() - var err error - select { - case <-ctx.Done(): - return ctx.Err() - case err = <-errChan: - } - if err != nil { - // Checking for "loc" itself here would interfere with - // future GET requests. - // - // On AWS, if X doesn't exist, a HEAD or GET request - // for X causes X's non-existence to be cached. Thus, - // if we test for X, then create X and return a - // signature to our client, the client might still get - // 404 from all keepstores when trying to read it. - // - // To avoid this, we avoid doing HEAD X or GET X until - // we know X has been written. - // - // Note that X might exist even though recent/X - // doesn't: for example, the response to HEAD recent/X - // might itself come from a stale cache. In such - // cases, we will return a false negative and - // PutHandler might needlessly create another replica - // on a different volume. That's not ideal, but it's - // better than passing the eventually-consistent - // problem on to our clients. - return v.translateError(err) } - rdr, err := v.getReaderWithContext(ctx, key) - if err != nil { - return err - } - defer rdr.Close() - return v.translateError(compareReaderWithBuf(ctx, rdr, expect, loc[:32])) + return err } -// Put writes a block. -func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error { - if v.volume.ReadOnly { - return MethodDisabledError - } - var opts s3.Options - size := len(block) - if size > 0 { - md5, err := hex.DecodeString(loc) - if err != nil { - return err - } - opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5) - // In AWS regions that use V4 signatures, we need to - // provide ContentSHA256 up front. Otherwise, the S3 - // library reads the request body (from our buffer) - // into another new buffer in order to compute the - // SHA256 before sending the request -- which would - // mean consuming 128 MiB of memory for the duration - // of a 64 MiB write. - opts.ContentSHA256 = fmt.Sprintf("%x", sha256.Sum256(block)) - } - - key := v.key(loc) - - // Send the block data through a pipe, so that (if we need to) - // we can close the pipe early and abandon our PutReader() - // goroutine, without worrying about PutReader() accessing our - // block buffer after we release it. - bufr, bufw := io.Pipe() - go func() { - io.Copy(bufw, bytes.NewReader(block)) - bufw.Close() - }() - - var err error - ready := make(chan bool) - go func() { - defer func() { - if ctx.Err() != nil { - v.logger.Debugf("abandoned PutReader goroutine finished with err: %s", err) - } - }() - defer close(ready) - err = v.bucket.PutReader(key, bufr, int64(size), "application/octet-stream", s3ACL, opts) - if err != nil { - return - } - err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{}) - }() - select { - case <-ctx.Done(): - v.logger.Debugf("taking PutReader's input away: %s", ctx.Err()) - // Our pipe might be stuck in Write(), waiting for - // PutReader() to read. If so, un-stick it. This means - // PutReader will get corrupt data, but that's OK: the - // size and MD5 won't match, so the write will fail. - go io.Copy(ioutil.Discard, bufr) - // CloseWithError() will return once pending I/O is done. - bufw.CloseWithError(ctx.Err()) - v.logger.Debugf("abandoning PutReader goroutine") - return ctx.Err() - case <-ready: - // Unblock pipe in case PutReader did not consume it. - io.Copy(ioutil.Discard, bufr) - return v.translateError(err) +// safeCopy calls CopyObjectRequest, and checks the response to make +// sure the copy succeeded and updated the timestamp on the +// destination object +// +// (If something goes wrong during the copy, the error will be +// embedded in the 200 OK response) +func (v *s3Volume) safeCopy(dst, src string) error { + input := &s3.CopyObjectInput{ + Bucket: aws.String(v.bucket.bucket), + ContentType: aws.String("application/octet-stream"), + CopySource: aws.String(v.bucket.bucket + "/" + src), + Key: aws.String(dst), } -} -// Touch sets the timestamp for the given locator to the current time. -func (v *S3Volume) Touch(loc string) error { - if v.volume.ReadOnly { - return MethodDisabledError - } - key := v.key(loc) - _, err := v.bucket.Head(key, nil) - err = v.translateError(err) - if os.IsNotExist(err) && v.fixRace(key) { - // The data object got trashed in a race, but fixRace - // rescued it. - } else if err != nil { - return err - } - err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{}) - return v.translateError(err) -} + req := v.bucket.svc.CopyObjectRequest(input) + resp, err := req.Send(context.Background()) -// Mtime returns the stored timestamp for the given locator. -func (v *S3Volume) Mtime(loc string) (time.Time, error) { - key := v.key(loc) - _, err := v.bucket.Head(key, nil) - if err != nil { - return zeroTime, v.translateError(err) - } - resp, err := v.bucket.Head("recent/"+key, nil) err = v.translateError(err) if os.IsNotExist(err) { - // The data object X exists, but recent/X is missing. - err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{}) - if err != nil { - v.logger.WithError(err).Errorf("error creating %q", "recent/"+key) - return zeroTime, v.translateError(err) - } - v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+key) - resp, err = v.bucket.Head("recent/"+key, nil) - if err != nil { - v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key) - return zeroTime, v.translateError(err) - } - } else if err != nil { - // HEAD recent/X failed for some other reason. - return zeroTime, err - } - return v.lastModified(resp) -} - -// IndexTo writes a complete list of locators with the given prefix -// for which Get() can retrieve data. -func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error { - // Use a merge sort to find matching sets of X and recent/X. - dataL := s3Lister{ - Logger: v.logger, - Bucket: v.bucket.Bucket(), - Prefix: v.key(prefix), - PageSize: v.IndexPageSize, - Stats: &v.bucket.stats, - } - recentL := s3Lister{ - Logger: v.logger, - Bucket: v.bucket.Bucket(), - Prefix: "recent/" + v.key(prefix), - PageSize: v.IndexPageSize, - Stats: &v.bucket.stats, - } - for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() { - if data.Key >= "g" { - // Conveniently, "recent/*" and "trash/*" are - // lexically greater than all hex-encoded data - // hashes, so stopping here avoids iterating - // over all of them needlessly with dataL. - break - } - loc, isBlk := v.isKeepBlock(data.Key) - if !isBlk { - continue - } - - // stamp is the list entry we should use to report the - // last-modified time for this data block: it will be - // the recent/X entry if one exists, otherwise the - // entry for the data block itself. - stamp := data - - // Advance to the corresponding recent/X marker, if any - for recent != nil && recentL.Error() == nil { - if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 { - recent = recentL.Next() - continue - } else if cmp == 0 { - stamp = recent - recent = recentL.Next() - break - } else { - // recent/X marker is missing: we'll - // use the timestamp on the data - // object. - break - } - } - if err := recentL.Error(); err != nil { - return err - } - t, err := time.Parse(time.RFC3339, stamp.LastModified) - if err != nil { - return err - } - // We truncate sub-second precision here. Otherwise - // timestamps will never match the RFC1123-formatted - // Last-Modified values parsed by Mtime(). - fmt.Fprintf(writer, "%s+%d %d\n", loc, data.Size, t.Unix()*1000000000) - } - return dataL.Error() -} - -// Trash a Keep block. -func (v *S3Volume) Trash(loc string) error { - if v.volume.ReadOnly { - return MethodDisabledError - } - if t, err := v.Mtime(loc); err != nil { - return err - } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() { - return nil - } - key := v.key(loc) - if v.cluster.Collections.BlobTrashLifetime == 0 { - if !v.UnsafeDelete { - return ErrS3TrashDisabled - } - return v.translateError(v.bucket.Del(key)) - } - err := v.checkRaceWindow(key) - if err != nil { - return err - } - err = v.safeCopy("trash/"+key, key) - if err != nil { return err - } - return v.translateError(v.bucket.Del(key)) -} - -// checkRaceWindow returns a non-nil error if trash/key is, or might -// be, in the race window (i.e., it's not safe to trash key). -func (v *S3Volume) checkRaceWindow(key string) error { - resp, err := v.bucket.Head("trash/"+key, nil) - err = v.translateError(err) - if os.IsNotExist(err) { - // OK, trash/X doesn't exist so we're not in the race - // window - return nil } else if err != nil { - // Error looking up trash/X. We don't know whether - // we're in the race window - return err - } - t, err := v.lastModified(resp) - if err != nil { - // Can't parse timestamp - return err - } - safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow))) - if safeWindow <= 0 { - // We can't count on "touch trash/X" to prolong - // trash/X's lifetime. The new timestamp might not - // become visible until now+raceWindow, and EmptyTrash - // is allowed to delete trash/X before then. - return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow) + return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.bucket+"/"+src, err) } - // trash/X exists, but it won't be eligible for deletion until - // after now+raceWindow, so it's safe to overwrite it. - return nil -} -// safeCopy calls PutCopy, and checks the response to make sure the -// copy succeeded and updated the timestamp on the destination object -// (PutCopy returns 200 OK if the request was received, even if the -// copy failed). -func (v *S3Volume) safeCopy(dst, src string) error { - resp, err := v.bucket.Bucket().PutCopy(dst, s3ACL, s3.CopyOptions{ - ContentType: "application/octet-stream", - MetadataDirective: "REPLACE", - }, v.bucket.Bucket().Name+"/"+src) - err = v.translateError(err) - if os.IsNotExist(err) { - return err - } else if err != nil { - return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Bucket().Name+"/"+src, err) - } - if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil { - return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err) - } else if time.Now().Sub(t) > maxClockSkew { - return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t) + if resp.CopyObjectResult.LastModified == nil { + return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.CopyObjectResult.LastModified, err) + } else if time.Now().Sub(*resp.CopyObjectResult.LastModified) > maxClockSkew { + return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.CopyObjectResult.LastModified, resp.CopyObjectResult.LastModified) } return nil } -// Get the LastModified header from resp, and parse it as RFC1123 or -// -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123. -func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) { - s := resp.Header.Get("Last-Modified") - t, err = time.Parse(time.RFC1123, s) - if err != nil && s != "" { - // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT", - // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT" - // as required by HTTP spec. If it's not a valid HTTP - // header value, it's probably AWS (or s3test) giving - // us a nearly-RFC1123 timestamp. - t, err = time.Parse(nearlyRFC1123, s) +func (v *s3Volume) check(ec2metadataHostname string) error { + if v.Bucket == "" { + return errors.New("DriverParameters: Bucket must be provided") } - return -} - -// Untrash moves block from trash back into store -func (v *S3Volume) Untrash(loc string) error { - key := v.key(loc) - err := v.safeCopy(key, "trash/"+key) - if err != nil { - return err + if v.IndexPageSize == 0 { + v.IndexPageSize = 1000 } - err = v.bucket.PutReader("recent/"+key, nil, 0, "application/octet-stream", s3ACL, s3.Options{}) - return v.translateError(err) -} - -// Status returns a *VolumeStatus representing the current in-use -// storage capacity and a fake available capacity that doesn't make -// the volume seem full or nearly-full. -func (v *S3Volume) Status() *VolumeStatus { - return &VolumeStatus{ - DeviceNum: 1, - BytesFree: BlockSize * 1000, - BytesUsed: 1, + if v.RaceWindow < 0 { + return errors.New("DriverParameters: RaceWindow must not be negative") } -} - -// InternalStats returns bucket I/O and API call counters. -func (v *S3Volume) InternalStats() interface{} { - return &v.bucket.stats -} - -// String implements fmt.Stringer. -func (v *S3Volume) String() string { - return fmt.Sprintf("s3-bucket:%+q", v.Bucket) -} - -var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`) -func (v *S3Volume) isKeepBlock(s string) (string, bool) { - if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] { - s = s[v.PrefixLength+1:] + if v.V2Signature { + return errors.New("DriverParameters: V2Signature is not supported") } - return s, s3KeepBlockRegexp.MatchString(s) -} -// Return the key used for a given loc. If PrefixLength==0 then -// key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is -// "abc/abcdef0123", etc. -func (v *S3Volume) key(loc string) string { - if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 { - return loc[:v.PrefixLength] + "/" + loc - } else { - return loc - } -} + defaultResolver := endpoints.NewDefaultResolver() -// fixRace(X) is called when "recent/X" exists but "X" doesn't -// exist. If the timestamps on "recent/X" and "trash/X" indicate there -// was a race between Put and Trash, fixRace recovers from the race by -// Untrashing the block. -func (v *S3Volume) fixRace(key string) bool { - trash, err := v.bucket.Head("trash/"+key, nil) - if err != nil { - if !os.IsNotExist(v.translateError(err)) { - v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key) + cfg := defaults.Config() + + if v.Endpoint == "" && v.Region == "" { + return fmt.Errorf("AWS region or endpoint must be specified") + } else if v.Endpoint != "" || ec2metadataHostname != "" { + myCustomResolver := func(service, region string) (aws.Endpoint, error) { + if v.Endpoint != "" && service == "s3" { + return aws.Endpoint{ + URL: v.Endpoint, + SigningRegion: region, + }, nil + } else if service == "ec2metadata" && ec2metadataHostname != "" { + return aws.Endpoint{ + URL: ec2metadataHostname, + }, nil + } else { + return defaultResolver.ResolveEndpoint(service, region) + } } - return false + cfg.EndpointResolver = aws.EndpointResolverFunc(myCustomResolver) } - trashTime, err := v.lastModified(trash) - if err != nil { - v.logger.WithError(err).Errorf("fixRace: error parsing time %q", trash.Header.Get("Last-Modified")) - return false + if v.Region == "" { + // Endpoint is already specified (otherwise we would + // have errored out above), but Region is also + // required by the aws sdk, in order to determine + // SignatureVersions. + v.Region = "us-east-1" } + cfg.Region = v.Region - recent, err := v.bucket.Head("recent/"+key, nil) - if err != nil { - v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key) - return false + // Zero timeouts mean "wait forever", which is a bad + // default. Default to long timeouts instead. + if v.ConnectTimeout == 0 { + v.ConnectTimeout = s3DefaultConnectTimeout } - recentTime, err := v.lastModified(recent) - if err != nil { - v.logger.WithError(err).Errorf("fixRace: error parsing time %q", recent.Header.Get("Last-Modified")) - return false + if v.ReadTimeout == 0 { + v.ReadTimeout = s3DefaultReadTimeout } - ageWhenTrashed := trashTime.Sub(recentTime) - if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() { - // No evidence of a race: block hasn't been written - // since it became eligible for Trash. No fix needed. - return false - } + creds := aws.NewChainProvider( + []aws.CredentialsProvider{ + aws.NewStaticCredentialsProvider(v.AccessKeyID, v.SecretAccessKey, v.AuthToken), + ec2rolecreds.New(ec2metadata.New(cfg)), + }) - v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL) - v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key) - err = v.safeCopy(key, "trash/"+key) - if err != nil { - v.logger.WithError(err).Error("fixRace: copy failed") - return false + cfg.Credentials = creds + + v.bucket = &s3Bucket{ + bucket: v.Bucket, + svc: s3.New(cfg), } - return true + + // Set up prometheus metrics + lbls := prometheus.Labels{"device_id": v.DeviceID()} + v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls) + + return nil } -func (v *S3Volume) translateError(err error) error { - switch err := err.(type) { - case *s3.Error: - if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") || - strings.Contains(err.Error(), "Not Found") { - return os.ErrNotExist - } - // Other 404 errors like NoSuchVersion and - // NoSuchBucket are different problems which should - // get called out downstream, so we don't convert them - // to os.ErrNotExist. - } - return err +// DeviceID returns a globally unique ID for the storage bucket. +func (v *s3Volume) DeviceID() string { + return "s3://" + v.Endpoint + "/" + v.Bucket } // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime // and deletes them from the volume. -func (v *S3Volume) EmptyTrash() { - if v.cluster.Collections.BlobDeleteConcurrency < 1 { - return - } - +func (v *s3Volume) EmptyTrash() { var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64 // Define "ready to delete" as "...when EmptyTrash started". startT := time.Now() - emptyOneKey := func(trash *s3.Key) { - key := trash.Key[6:] - loc, isBlk := v.isKeepBlock(key) - if !isBlk { + emptyOneKey := func(trash *s3.Object) { + key := strings.TrimPrefix(*trash.Key, "trash/") + loc, isblk := v.isKeepBlock(key) + if !isblk { return } - atomic.AddInt64(&bytesInTrash, trash.Size) + atomic.AddInt64(&bytesInTrash, *trash.Size) atomic.AddInt64(&blocksInTrash, 1) - trashT, err := time.Parse(time.RFC3339, trash.LastModified) - if err != nil { - v.logger.Warnf("EmptyTrash: %q: parse %q: %s", trash.Key, trash.LastModified, err) - return - } - recent, err := v.bucket.Head("recent/"+key, nil) + trashT := *trash.LastModified + recent, err := v.head("recent/" + key) if err != nil && os.IsNotExist(v.translateError(err)) { - v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err) - err = v.Untrash(loc) + v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err) + err = v.BlockUntrash(loc) if err != nil { v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc) } @@ -848,14 +269,9 @@ func (v *S3Volume) EmptyTrash() { v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+key) return } - recentT, err := v.lastModified(recent) - if err != nil { - v.logger.WithError(err).Warnf("EmptyTrash: %q: error parsing %q", "recent/"+key, recent.Header.Get("Last-Modified")) - return - } - if trashT.Sub(recentT) < v.cluster.Collections.BlobSigningTTL.Duration() { - if age := startT.Sub(recentT); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) { - // recent/loc is too old to protect + if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() { + if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) { + // recent/key is too old to protect // loc from being Trashed again during // the raceWindow that starts if we // delete trash/X now. @@ -865,10 +281,10 @@ func (v *S3Volume) EmptyTrash() { // necessary to avoid starvation. v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc) v.fixRace(key) - v.Touch(loc) + v.BlockTouch(loc) return } - _, err := v.bucket.Head(key, nil) + _, err := v.head(key) if os.IsNotExist(err) { v.logger.Infof("EmptyTrash: detected recent race for %q, calling fixRace", loc) v.fixRace(key) @@ -881,17 +297,17 @@ func (v *S3Volume) EmptyTrash() { if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() { return } - err = v.bucket.Del(trash.Key) + err = v.bucket.Del(*trash.Key) if err != nil { - v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", trash.Key) + v.logger.WithError(err).Errorf("EmptyTrash: error deleting %q", *trash.Key) return } - atomic.AddInt64(&bytesDeleted, trash.Size) + atomic.AddInt64(&bytesDeleted, *trash.Size) atomic.AddInt64(&blocksDeleted, 1) - _, err = v.bucket.Head(key, nil) + _, err = v.head(*trash.Key) if err == nil { - v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", key, key) + v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc) return } if !os.IsNotExist(v.translateError(err)) { @@ -905,7 +321,7 @@ func (v *S3Volume) EmptyTrash() { } var wg sync.WaitGroup - todo := make(chan *s3.Key, v.cluster.Collections.BlobDeleteConcurrency) + todo := make(chan *s3.Object, v.cluster.Collections.BlobDeleteConcurrency) for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ { wg.Add(1) go func() { @@ -916,9 +332,9 @@ func (v *S3Volume) EmptyTrash() { }() } - trashL := s3Lister{ + trashL := s3awsLister{ Logger: v.logger, - Bucket: v.bucket.Bucket(), + Bucket: v.bucket, Prefix: "trash/", PageSize: v.IndexPageSize, Stats: &v.bucket.stats, @@ -932,23 +348,193 @@ func (v *S3Volume) EmptyTrash() { if err := trashL.Error(); err != nil { v.logger.WithError(err).Error("EmptyTrash: lister failed") } - v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted) + v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted) +} + +// fixRace(X) is called when "recent/X" exists but "X" doesn't +// exist. If the timestamps on "recent/X" and "trash/X" indicate there +// was a race between Put and Trash, fixRace recovers from the race by +// Untrashing the block. +func (v *s3Volume) fixRace(key string) bool { + trash, err := v.head("trash/" + key) + if err != nil { + if !os.IsNotExist(v.translateError(err)) { + v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "trash/"+key) + } + return false + } + + recent, err := v.head("recent/" + key) + if err != nil { + v.logger.WithError(err).Errorf("fixRace: HEAD %q failed", "recent/"+key) + return false + } + + recentTime := *recent.LastModified + trashTime := *trash.LastModified + ageWhenTrashed := trashTime.Sub(recentTime) + if ageWhenTrashed >= v.cluster.Collections.BlobSigningTTL.Duration() { + // No evidence of a race: block hasn't been written + // since it became eligible for Trash. No fix needed. + return false + } + + v.logger.Infof("fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", key, trashTime, recentTime, ageWhenTrashed, v.cluster.Collections.BlobSigningTTL) + v.logger.Infof("fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+key, key) + err = v.safeCopy(key, "trash/"+key) + if err != nil { + v.logger.WithError(err).Error("fixRace: copy failed") + return false + } + return true +} + +func (v *s3Volume) head(key string) (result *s3.HeadObjectOutput, err error) { + input := &s3.HeadObjectInput{ + Bucket: aws.String(v.bucket.bucket), + Key: aws.String(key), + } + + req := v.bucket.svc.HeadObjectRequest(input) + res, err := req.Send(context.TODO()) + + v.bucket.stats.TickOps("head") + v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.HeadOps) + v.bucket.stats.TickErr(err) + + if err != nil { + return nil, v.translateError(err) + } + result = res.HeadObjectOutput + return +} + +// BlockRead reads a Keep block that has been stored as a block blob +// in the S3 bucket. +func (v *s3Volume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error { + key := v.key(hash) + err := v.readWorker(ctx, key, w) + if err != nil { + err = v.translateError(err) + if !os.IsNotExist(err) { + return err + } + + _, err = v.head("recent/" + key) + err = v.translateError(err) + if err != nil { + // If we can't read recent/X, there's no point in + // trying fixRace. Give up. + return err + } + if !v.fixRace(key) { + err = os.ErrNotExist + return err + } + + err = v.readWorker(ctx, key, w) + if err != nil { + v.logger.Warnf("reading %s after successful fixRace: %s", hash, err) + err = v.translateError(err) + return err + } + } + return nil +} + +func (v *s3Volume) readWorker(ctx context.Context, key string, dst io.WriterAt) error { + downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) { + u.PartSize = s3downloaderPartSize + u.Concurrency = s3downloaderReadConcurrency + }) + count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{ + Bucket: aws.String(v.bucket.bucket), + Key: aws.String(key), + }) + v.bucket.stats.TickOps("get") + v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps) + v.bucket.stats.TickErr(err) + v.bucket.stats.TickInBytes(uint64(count)) + return v.translateError(err) +} + +func (v *s3Volume) writeObject(ctx context.Context, key string, r io.Reader) error { + if r == nil { + // r == nil leads to a memory violation in func readFillBuf in + // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go + r = bytes.NewReader(nil) + } + + uploadInput := s3manager.UploadInput{ + Bucket: aws.String(v.bucket.bucket), + Key: aws.String(key), + Body: r, + } + + if loc, ok := v.isKeepBlock(key); ok { + var contentMD5 string + md5, err := hex.DecodeString(loc) + if err != nil { + return v.translateError(err) + } + contentMD5 = base64.StdEncoding.EncodeToString(md5) + uploadInput.ContentMD5 = &contentMD5 + } + + // Experimentation indicated that using concurrency 5 yields the best + // throughput, better than higher concurrency (10 or 13) by ~5%. + // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024) + // is detrimental to throughput (minus ~15%). + uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) { + u.PartSize = s3uploaderPartSize + u.Concurrency = s3uploaderWriteConcurrency + }) + + // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256: + // the aws-sdk-go v2 SDK uses a ReadSeeker to avoid having to copy the + // block, so there is no extra memory use to be concerned about. See + // makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable + // calculating the Sha-256 because we don't need it; we already use md5sum + // hashes that match the name of the block. + _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) { + r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD") + })) + + v.bucket.stats.TickOps("put") + v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps) + v.bucket.stats.TickErr(err) + + return v.translateError(err) +} + +// Put writes a block. +func (v *s3Volume) BlockWrite(ctx context.Context, hash string, data []byte) error { + // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3 + // sdk to avoid memory allocation there. See #17339 for more information. + rdr := bytes.NewReader(data) + r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes) + key := v.key(hash) + err := v.writeObject(ctx, key, r) + if err != nil { + return err + } + return v.writeObject(ctx, "recent/"+key, nil) } -type s3Lister struct { - Logger logrus.FieldLogger - Bucket *s3.Bucket - Prefix string - PageSize int - Stats *s3bucketStats - nextMarker string - buf []s3.Key - err error +type s3awsLister struct { + Logger logrus.FieldLogger + Bucket *s3Bucket + Prefix string + PageSize int + Stats *s3awsbucketStats + ContinuationToken string + buf []s3.Object + err error } // First fetches the first page and returns the first item. It returns // nil if the response is the empty set or an error occurs. -func (lister *s3Lister) First() *s3.Key { +func (lister *s3awsLister) First() *s3.Object { lister.getPage() return lister.pop() } @@ -956,41 +542,65 @@ func (lister *s3Lister) First() *s3.Key { // Next returns the next item, fetching the next page if necessary. It // returns nil if the last available item has already been fetched, or // an error occurs. -func (lister *s3Lister) Next() *s3.Key { - if len(lister.buf) == 0 && lister.nextMarker != "" { +func (lister *s3awsLister) Next() *s3.Object { + if len(lister.buf) == 0 && lister.ContinuationToken != "" { lister.getPage() } return lister.pop() } // Return the most recent error encountered by First or Next. -func (lister *s3Lister) Error() error { +func (lister *s3awsLister) Error() error { return lister.err } -func (lister *s3Lister) getPage() { +func (lister *s3awsLister) getPage() { lister.Stats.TickOps("list") lister.Stats.Tick(&lister.Stats.Ops, &lister.Stats.ListOps) - resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize) - lister.nextMarker = "" + + var input *s3.ListObjectsV2Input + if lister.ContinuationToken == "" { + input = &s3.ListObjectsV2Input{ + Bucket: aws.String(lister.Bucket.bucket), + MaxKeys: aws.Int64(int64(lister.PageSize)), + Prefix: aws.String(lister.Prefix), + } + } else { + input = &s3.ListObjectsV2Input{ + Bucket: aws.String(lister.Bucket.bucket), + MaxKeys: aws.Int64(int64(lister.PageSize)), + Prefix: aws.String(lister.Prefix), + ContinuationToken: &lister.ContinuationToken, + } + } + + req := lister.Bucket.svc.ListObjectsV2Request(input) + resp, err := req.Send(context.Background()) if err != nil { - lister.err = err + if aerr, ok := err.(awserr.Error); ok { + lister.err = aerr + } else { + lister.err = err + } return } - if resp.IsTruncated { - lister.nextMarker = resp.NextMarker + + if *resp.IsTruncated { + lister.ContinuationToken = *resp.NextContinuationToken + } else { + lister.ContinuationToken = "" } - lister.buf = make([]s3.Key, 0, len(resp.Contents)) + lister.buf = make([]s3.Object, 0, len(resp.Contents)) for _, key := range resp.Contents { - if !strings.HasPrefix(key.Key, lister.Prefix) { - lister.Logger.Warnf("s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key) + if !strings.HasPrefix(*key.Key, lister.Prefix) { + lister.Logger.Warnf("s3awsLister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, *key.Key) continue } lister.buf = append(lister.buf, key) } } -func (lister *s3Lister) pop() (k *s3.Key) { +func (lister *s3awsLister) pop() (k *s3.Object) { if len(lister.buf) > 0 { k = &lister.buf[0] lister.buf = lister.buf[1:] @@ -998,71 +608,201 @@ func (lister *s3Lister) pop() (k *s3.Key) { return } -// s3bucket wraps s3.bucket and counts I/O and API usage stats. The -// wrapped bucket can be replaced atomically with SetBucket in order -// to update credentials. -type s3bucket struct { - bucket *s3.Bucket - stats s3bucketStats - mu sync.Mutex -} +// Index writes a complete list of locators with the given prefix +// for which Get() can retrieve data. +func (v *s3Volume) Index(ctx context.Context, prefix string, writer io.Writer) error { + prefix = v.key(prefix) + // Use a merge sort to find matching sets of X and recent/X. + dataL := s3awsLister{ + Logger: v.logger, + Bucket: v.bucket, + Prefix: prefix, + PageSize: v.IndexPageSize, + Stats: &v.bucket.stats, + } + recentL := s3awsLister{ + Logger: v.logger, + Bucket: v.bucket, + Prefix: "recent/" + prefix, + PageSize: v.IndexPageSize, + Stats: &v.bucket.stats, + } + for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() { + if ctx.Err() != nil { + return ctx.Err() + } + if *data.Key >= "g" { + // Conveniently, "recent/*" and "trash/*" are + // lexically greater than all hex-encoded data + // hashes, so stopping here avoids iterating + // over all of them needlessly with dataL. + break + } + loc, isblk := v.isKeepBlock(*data.Key) + if !isblk { + continue + } + + // stamp is the list entry we should use to report the + // last-modified time for this data block: it will be + // the recent/X entry if one exists, otherwise the + // entry for the data block itself. + stamp := data -func (b *s3bucket) Bucket() *s3.Bucket { - b.mu.Lock() - defer b.mu.Unlock() - return b.bucket + // Advance to the corresponding recent/X marker, if any + for recent != nil && recentL.Error() == nil { + if cmp := strings.Compare((*recent.Key)[7:], *data.Key); cmp < 0 { + recent = recentL.Next() + continue + } else if cmp == 0 { + stamp = recent + recent = recentL.Next() + break + } else { + // recent/X marker is missing: we'll + // use the timestamp on the data + // object. + break + } + } + if err := recentL.Error(); err != nil { + return err + } + // We truncate sub-second precision here. Otherwise + // timestamps will never match the RFC1123-formatted + // Last-Modified values parsed by Mtime(). + fmt.Fprintf(writer, "%s+%d %d\n", loc, *data.Size, stamp.LastModified.Unix()*1000000000) + } + return dataL.Error() } -func (b *s3bucket) SetBucket(bucket *s3.Bucket) { - b.mu.Lock() - defer b.mu.Unlock() - b.bucket = bucket +// Mtime returns the stored timestamp for the given locator. +func (v *s3Volume) Mtime(loc string) (time.Time, error) { + key := v.key(loc) + _, err := v.head(key) + if err != nil { + return s3AWSZeroTime, v.translateError(err) + } + resp, err := v.head("recent/" + key) + err = v.translateError(err) + if os.IsNotExist(err) { + // The data object X exists, but recent/X is missing. + err = v.writeObject(context.Background(), "recent/"+key, nil) + if err != nil { + v.logger.WithError(err).Errorf("error creating %q", "recent/"+key) + return s3AWSZeroTime, v.translateError(err) + } + v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+key) + resp, err = v.head("recent/" + key) + if err != nil { + v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+key) + return s3AWSZeroTime, v.translateError(err) + } + } else if err != nil { + // HEAD recent/X failed for some other reason. + return s3AWSZeroTime, err + } + return *resp.LastModified, err } -func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) { - rdr, err := b.Bucket().GetReader(path) - b.stats.TickOps("get") - b.stats.Tick(&b.stats.Ops, &b.stats.GetOps) - b.stats.TickErr(err) - return NewCountingReader(rdr, b.stats.TickInBytes), err +// InternalStats returns bucket I/O and API call counters. +func (v *s3Volume) InternalStats() interface{} { + return &v.bucket.stats } -func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) { - resp, err := b.Bucket().Head(path, headers) - b.stats.TickOps("head") - b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps) - b.stats.TickErr(err) - return resp, err +// BlockTouch sets the timestamp for the given locator to the current time. +func (v *s3Volume) BlockTouch(hash string) error { + key := v.key(hash) + _, err := v.head(key) + err = v.translateError(err) + if os.IsNotExist(err) && v.fixRace(key) { + // The data object got trashed in a race, but fixRace + // rescued it. + } else if err != nil { + return err + } + err = v.writeObject(context.Background(), "recent/"+key, nil) + return v.translateError(err) } -func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error { - if length == 0 { - // goamz will only send Content-Length: 0 when reader - // is nil due to net.http.Request.ContentLength - // behavior. Otherwise, Content-Length header is - // omitted which will cause some S3 services - // (including AWS and Ceph RadosGW) to fail to create - // empty objects. - r = nil - } else { - r = NewCountingReader(r, b.stats.TickOutBytes) +// checkRaceWindow returns a non-nil error if trash/key is, or might +// be, in the race window (i.e., it's not safe to trash key). +func (v *s3Volume) checkRaceWindow(key string) error { + resp, err := v.head("trash/" + key) + err = v.translateError(err) + if os.IsNotExist(err) { + // OK, trash/X doesn't exist so we're not in the race + // window + return nil + } else if err != nil { + // Error looking up trash/X. We don't know whether + // we're in the race window + return err } - err := b.Bucket().PutReader(path, r, length, contType, perm, options) - b.stats.TickOps("put") - b.stats.Tick(&b.stats.Ops, &b.stats.PutOps) - b.stats.TickErr(err) - return err + t := resp.LastModified + safeWindow := t.Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Sub(time.Now().Add(time.Duration(v.RaceWindow))) + if safeWindow <= 0 { + // We can't count on "touch trash/X" to prolong + // trash/X's lifetime. The new timestamp might not + // become visible until now+raceWindow, and EmptyTrash + // is allowed to delete trash/X before then. + return fmt.Errorf("%s: same block is already in trash, and safe window ended %s ago", key, -safeWindow) + } + // trash/X exists, but it won't be eligible for deletion until + // after now+raceWindow, so it's safe to overwrite it. + return nil } -func (b *s3bucket) Del(path string) error { - err := b.Bucket().Del(path) +func (b *s3Bucket) Del(path string) error { + input := &s3.DeleteObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(path), + } + req := b.svc.DeleteObjectRequest(input) + _, err := req.Send(context.Background()) b.stats.TickOps("delete") b.stats.Tick(&b.stats.Ops, &b.stats.DelOps) b.stats.TickErr(err) return err } -type s3bucketStats struct { +// Trash a Keep block. +func (v *s3Volume) BlockTrash(loc string) error { + if t, err := v.Mtime(loc); err != nil { + return err + } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() { + return nil + } + key := v.key(loc) + if v.cluster.Collections.BlobTrashLifetime == 0 { + if !v.UnsafeDelete { + return errS3TrashDisabled + } + return v.translateError(v.bucket.Del(key)) + } + err := v.checkRaceWindow(key) + if err != nil { + return err + } + err = v.safeCopy("trash/"+key, key) + if err != nil { + return err + } + return v.translateError(v.bucket.Del(key)) +} + +// BlockUntrash moves block from trash back into store +func (v *s3Volume) BlockUntrash(hash string) error { + key := v.key(hash) + err := v.safeCopy(key, "trash/"+key) + if err != nil { + return err + } + err = v.writeObject(context.Background(), "recent/"+key, nil) + return v.translateError(err) +} + +type s3awsbucketStats struct { statsTicker Ops uint64 GetOps uint64 @@ -1072,13 +812,18 @@ type s3bucketStats struct { ListOps uint64 } -func (s *s3bucketStats) TickErr(err error) { +func (s *s3awsbucketStats) TickErr(err error) { if err == nil { return } errType := fmt.Sprintf("%T", err) - if err, ok := err.(*s3.Error); ok { - errType = errType + fmt.Sprintf(" %d %s", err.StatusCode, err.Code) + if aerr, ok := err.(awserr.Error); ok { + if reqErr, ok := err.(awserr.RequestFailure); ok { + // A service error occurred + errType = errType + fmt.Sprintf(" %d %s", reqErr.StatusCode(), aerr.Code()) + } else { + errType = errType + fmt.Sprintf(" 000 %s", aerr.Code()) + } } s.statsTicker.TickErr(err, errType) }