16 "github.com/AdRoll/goamz/aws"
17 "github.com/AdRoll/goamz/s3"
21 // ErrS3TrashDisabled is returned by Trash if that operation
22 // is impossible with the current config.
23 ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
25 s3AccessKeyFile string
26 s3SecretKeyFile string
31 s3RaceWindow time.Duration
37 maxClockSkew = 600 * time.Second
38 nearlyRFC1123 = "Mon, 2 Jan 2006 15:04:05 GMT"
41 type s3VolumeAdder struct {
45 func (s *s3VolumeAdder) Set(bucketName string) error {
47 return fmt.Errorf("no container name given")
49 if s3AccessKeyFile == "" || s3SecretKeyFile == "" {
50 return fmt.Errorf("-s3-access-key-file and -s3-secret-key-file arguments must given before -s3-bucket-volume")
52 region, ok := aws.Regions[s3RegionName]
55 return fmt.Errorf("unrecognized region %+q; try specifying -s3-endpoint instead", s3RegionName)
59 return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
60 "specify empty endpoint (\"-s3-endpoint=\") or use a different region name", s3RegionName, s3Endpoint)
64 S3Endpoint: s3Endpoint,
69 auth.AccessKey, err = readKeyFromFile(s3AccessKeyFile)
73 auth.SecretKey, err = readKeyFromFile(s3SecretKeyFile)
78 log.Print("Notice: -serialize is not supported by s3-bucket volumes.")
80 v := NewS3Volume(auth, region, bucketName, s3RaceWindow, flagReadonly, s3Replication)
81 if err := v.Check(); err != nil {
84 *s.volumeSet = append(*s.volumeSet, v)
88 func s3regions() (okList []string) {
89 for r := range aws.Regions {
90 okList = append(okList, r)
96 flag.Var(&s3VolumeAdder{&volumes},
98 "Use the given bucket as a storage volume. Can be given multiple times.")
103 fmt.Sprintf("AWS region used for subsequent -s3-bucket-volume arguments. Allowed values are %+q.", s3regions()))
108 "Endpoint URL used for subsequent -s3-bucket-volume arguments. If blank, use the AWS endpoint corresponding to the -s3-region argument. For Google Storage, use \"https://storage.googleapis.com\".")
111 "s3-access-key-file",
113 "File containing the access key used for subsequent -s3-bucket-volume arguments.")
116 "s3-secret-key-file",
118 "File containing the secret key used for subsequent -s3-bucket-volume arguments.")
123 "Maximum eventual consistency latency for subsequent -s3-bucket-volume arguments.")
128 "Replication level reported to clients for subsequent -s3-bucket-volume arguments.")
133 "EXPERIMENTAL. Enable deletion (garbage collection), even though there are known race conditions that can cause data loss.")
136 // S3Volume implements Volume using an S3 bucket.
137 type S3Volume struct {
139 raceWindow time.Duration
145 // NewS3Volume returns a new S3Volume using the given auth, region,
146 // and bucket name. The replication argument specifies the replication
147 // level to report when writing data.
148 func NewS3Volume(auth aws.Auth, region aws.Region, bucket string, raceWindow time.Duration, readonly bool, replication int) *S3Volume {
151 S3: s3.New(auth, region),
154 raceWindow: raceWindow,
156 replication: replication,
161 // Check returns an error if the volume is inaccessible (e.g., config
163 func (v *S3Volume) Check() error {
167 // getReader wraps (Bucket)GetReader.
169 // In situations where (Bucket)GetReader would fail because the block
170 // disappeared in a Trash race, getReader calls fixRace to recover the
171 // data, and tries again.
172 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
173 rdr, err = v.Bucket.GetReader(loc)
174 err = v.translateError(err)
175 if err == nil || !os.IsNotExist(err) {
178 _, err = v.Bucket.Head("recent/"+loc, nil)
179 err = v.translateError(err)
181 // If we can't read recent/X, there's no point in
182 // trying fixRace. Give up.
189 rdr, err = v.Bucket.GetReader(loc)
191 log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
192 err = v.translateError(err)
197 // Get a block: copy the block data into buf, and return the number of
199 func (v *S3Volume) Get(loc string, buf []byte) (int, error) {
200 rdr, err := v.getReader(loc)
205 n, err := io.ReadFull(rdr, buf)
207 case nil, io.EOF, io.ErrUnexpectedEOF:
210 return 0, v.translateError(err)
214 // Compare the given data with the stored data.
215 func (v *S3Volume) Compare(loc string, expect []byte) error {
216 rdr, err := v.getReader(loc)
221 return v.translateError(compareReaderWithBuf(rdr, expect, loc[:32]))
224 // Put writes a block.
225 func (v *S3Volume) Put(loc string, block []byte) error {
227 return MethodDisabledError
231 md5, err := hex.DecodeString(loc)
235 opts.ContentMD5 = base64.StdEncoding.EncodeToString(md5)
237 err := v.Bucket.Put(loc, block, "application/octet-stream", s3ACL, opts)
239 return v.translateError(err)
241 err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
242 return v.translateError(err)
245 // Touch sets the timestamp for the given locator to the current time.
246 func (v *S3Volume) Touch(loc string) error {
248 return MethodDisabledError
250 _, err := v.Bucket.Head(loc, nil)
251 err = v.translateError(err)
252 if os.IsNotExist(err) && v.fixRace(loc) {
253 // The data object got trashed in a race, but fixRace
255 } else if err != nil {
258 err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
259 return v.translateError(err)
262 // Mtime returns the stored timestamp for the given locator.
263 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
264 _, err := v.Bucket.Head(loc, nil)
266 return zeroTime, v.translateError(err)
268 resp, err := v.Bucket.Head("recent/"+loc, nil)
269 err = v.translateError(err)
270 if os.IsNotExist(err) {
271 // The data object X exists, but recent/X is missing.
272 err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
274 log.Printf("error: creating %q: %s", "recent/"+loc, err)
275 return zeroTime, v.translateError(err)
277 log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
278 resp, err = v.Bucket.Head("recent/"+loc, nil)
280 log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
281 return zeroTime, v.translateError(err)
283 } else if err != nil {
284 // HEAD recent/X failed for some other reason.
287 return v.lastModified(resp)
290 // IndexTo writes a complete list of locators with the given prefix
291 // for which Get() can retrieve data.
292 func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
293 // Use a merge sort to find matching sets of X and recent/X.
297 PageSize: v.indexPageSize,
301 Prefix: "recent/" + prefix,
302 PageSize: v.indexPageSize,
304 for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
306 // Conveniently, "recent/*" and "trash/*" are
307 // lexically greater than all hex-encoded data
308 // hashes, so stopping here avoids iterating
309 // over all of them needlessly with dataL.
312 if !v.isKeepBlock(data.Key) {
316 // stamp is the list entry we should use to report the
317 // last-modified time for this data block: it will be
318 // the recent/X entry if one exists, otherwise the
319 // entry for the data block itself.
322 // Advance to the corresponding recent/X marker, if any
324 if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
325 recent = recentL.Next()
329 recent = recentL.Next()
332 // recent/X marker is missing: we'll
333 // use the timestamp on the data
338 t, err := time.Parse(time.RFC3339, stamp.LastModified)
342 fmt.Fprintf(writer, "%s+%d %d\n", data.Key, data.Size, t.UnixNano())
347 // Trash a Keep block.
348 func (v *S3Volume) Trash(loc string) error {
350 return MethodDisabledError
352 if t, err := v.Mtime(loc); err != nil {
354 } else if time.Since(t) < blobSignatureTTL {
357 if trashLifetime == 0 {
359 return ErrS3TrashDisabled
361 return v.Bucket.Del(loc)
363 err := v.checkRaceWindow(loc)
367 err = v.safeCopy("trash/"+loc, loc)
371 return v.translateError(v.Bucket.Del(loc))
374 // checkRaceWindow returns a non-nil error if trash/loc is, or might
375 // be, in the race window (i.e., it's not safe to trash loc).
376 func (v *S3Volume) checkRaceWindow(loc string) error {
377 resp, err := v.Bucket.Head("trash/"+loc, nil)
378 err = v.translateError(err)
379 if os.IsNotExist(err) {
380 // OK, trash/X doesn't exist so we're not in the race
383 } else if err != nil {
384 // Error looking up trash/X. We don't know whether
385 // we're in the race window
388 t, err := v.lastModified(resp)
390 // Can't parse timestamp
393 safeWindow := t.Add(trashLifetime).Sub(time.Now().Add(v.raceWindow))
395 // We can't count on "touch trash/X" to prolong
396 // trash/X's lifetime. The new timestamp might not
397 // become visible until now+raceWindow, and EmptyTrash
398 // is allowed to delete trash/X before then.
399 return fmt.Errorf("same block is already in trash, and safe window ended %s ago", -safeWindow)
401 // trash/X exists, but it won't be eligible for deletion until
402 // after now+raceWindow, so it's safe to overwrite it.
406 // safeCopy calls PutCopy, and checks the response to make sure the
407 // copy succeeded and updated the timestamp on the destination object
408 // (PutCopy returns 200 OK if the request was received, even if the
410 func (v *S3Volume) safeCopy(dst, src string) error {
411 resp, err := v.Bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
412 ContentType: "application/octet-stream",
413 MetadataDirective: "REPLACE",
414 }, v.Bucket.Name+"/"+src)
415 err = v.translateError(err)
419 if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
420 return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
421 } else if time.Now().Sub(t) > maxClockSkew {
422 return fmt.Errorf("PutCopy succeeded but returned an old timestamp: %q: %s", resp.LastModified, t)
427 // Get the LastModified header from resp, and parse it as RFC1123 or
428 // -- if it isn't valid RFC1123 -- as Amazon's variant of RFC1123.
429 func (v *S3Volume) lastModified(resp *http.Response) (t time.Time, err error) {
430 s := resp.Header.Get("Last-Modified")
431 t, err = time.Parse(time.RFC1123, s)
432 if err != nil && s != "" {
433 // AWS example is "Sun, 1 Jan 2006 12:00:00 GMT",
434 // which isn't quite "Sun, 01 Jan 2006 12:00:00 GMT"
435 // as required by HTTP spec. If it's not a valid HTTP
436 // header value, it's probably AWS (or s3test) giving
437 // us a nearly-RFC1123 timestamp.
438 t, err = time.Parse(nearlyRFC1123, s)
443 // Untrash moves block from trash back into store
444 func (v *S3Volume) Untrash(loc string) error {
445 err := v.safeCopy(loc, "trash/"+loc)
449 err = v.Bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
450 return v.translateError(err)
453 // Status returns a *VolumeStatus representing the current in-use
454 // storage capacity and a fake available capacity that doesn't make
455 // the volume seem full or nearly-full.
456 func (v *S3Volume) Status() *VolumeStatus {
457 return &VolumeStatus{
459 BytesFree: BlockSize * 1000,
464 // String implements fmt.Stringer.
465 func (v *S3Volume) String() string {
466 return fmt.Sprintf("s3-bucket:%+q", v.Bucket.Name)
469 // Writable returns false if all future Put, Mtime, and Delete calls
470 // are expected to fail.
471 func (v *S3Volume) Writable() bool {
475 // Replication returns the storage redundancy of the underlying
476 // device. Configured via command line flag.
477 func (v *S3Volume) Replication() int {
481 var s3KeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
483 func (v *S3Volume) isKeepBlock(s string) bool {
484 return s3KeepBlockRegexp.MatchString(s)
487 // fixRace(X) is called when "recent/X" exists but "X" doesn't
488 // exist. If the timestamps on "recent/"+loc and "trash/"+loc indicate
489 // there was a race between Put and Trash, fixRace recovers from the
490 // race by Untrashing the block.
491 func (v *S3Volume) fixRace(loc string) bool {
492 trash, err := v.Bucket.Head("trash/"+loc, nil)
494 if !os.IsNotExist(v.translateError(err)) {
495 log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
499 trashTime, err := v.lastModified(trash)
501 log.Printf("error: fixRace: parse %q: %s", trash.Header.Get("Last-Modified"), err)
505 recent, err := v.Bucket.Head("recent/"+loc, nil)
507 log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
510 recentTime, err := v.lastModified(recent)
512 log.Printf("error: fixRace: parse %q: %s", recent.Header.Get("Last-Modified"), err)
516 ageWhenTrashed := trashTime.Sub(recentTime)
517 if ageWhenTrashed >= blobSignatureTTL {
518 // No evidence of a race: block hasn't been written
519 // since it became eligible for Trash. No fix needed.
523 log.Printf("notice: fixRace: %q: trashed at %s but touched at %s (age when trashed = %s < %s)", loc, trashTime, recentTime, ageWhenTrashed, blobSignatureTTL)
524 log.Printf("notice: fixRace: copying %q to %q to recover from race between Put/Touch and Trash", "recent/"+loc, loc)
525 err = v.safeCopy(loc, "trash/"+loc)
527 log.Printf("error: fixRace: %s", err)
533 func (v *S3Volume) translateError(err error) error {
534 switch err := err.(type) {
536 if (err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey") ||
537 strings.Contains(err.Error(), "Not Found") {
538 return os.ErrNotExist
540 // Other 404 errors like NoSuchVersion and
541 // NoSuchBucket are different problems which should
542 // get called out downstream, so we don't convert them
543 // to os.ErrNotExist.
548 // EmptyTrash looks for trashed blocks that exceeded trashLifetime
549 // and deletes them from the volume.
550 func (v *S3Volume) EmptyTrash() {
551 var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
553 // Use a merge sort to find matching sets of trash/X and recent/X.
557 PageSize: v.indexPageSize,
559 // Define "ready to delete" as "...when EmptyTrash started".
561 for trash := trashL.First(); trash != nil; trash = trashL.Next() {
563 if !v.isKeepBlock(loc) {
566 bytesInTrash += trash.Size
569 trashT, err := time.Parse(time.RFC3339, trash.LastModified)
571 log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
574 recent, err := v.Bucket.Head("recent/"+loc, nil)
575 if err != nil && os.IsNotExist(v.translateError(err)) {
576 log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
579 log.Printf("error: %s: EmptyTrash: Untrash(%q): %s", v, loc, err)
582 } else if err != nil {
583 log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
586 recentT, err := v.lastModified(recent)
588 log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, "recent/"+loc, recent.Header.Get("Last-Modified"), err)
591 if trashT.Sub(recentT) < blobSignatureTTL {
592 if age := startT.Sub(recentT); age >= blobSignatureTTL-v.raceWindow {
593 // recent/loc is too old to protect
594 // loc from being Trashed again during
595 // the raceWindow that starts if we
596 // delete trash/X now.
598 // Note this means (trashCheckInterval
599 // < blobSignatureTTL - raceWindow) is
600 // necessary to avoid starvation.
601 log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
605 } else if _, err := v.Bucket.Head(loc, nil); os.IsNotExist(err) {
606 log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
609 } else if err != nil {
610 log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, loc, err)
614 if startT.Sub(trashT) < trashLifetime {
617 err = v.Bucket.Del(trash.Key)
619 log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
622 bytesDeleted += trash.Size
625 _, err = v.Bucket.Head(loc, nil)
626 if os.IsNotExist(err) {
627 err = v.Bucket.Del("recent/" + loc)
629 log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
631 } else if err != nil {
632 log.Printf("warning: %s: EmptyTrash: HEAD %q: %s", v, "recent/"+loc, err)
635 if err := trashL.Error(); err != nil {
636 log.Printf("error: %s: EmptyTrash: lister: %s", v, err)
638 log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
641 type s3Lister struct {
650 // First fetches the first page and returns the first item. It returns
651 // nil if the response is the empty set or an error occurs.
652 func (lister *s3Lister) First() *s3.Key {
657 // Next returns the next item, fetching the next page if necessary. It
658 // returns nil if the last available item has already been fetched, or
660 func (lister *s3Lister) Next() *s3.Key {
661 if len(lister.buf) == 0 && lister.nextMarker != "" {
667 // Return the most recent error encountered by First or Next.
668 func (lister *s3Lister) Error() error {
672 func (lister *s3Lister) getPage() {
673 resp, err := lister.Bucket.List(lister.Prefix, "", lister.nextMarker, lister.PageSize)
674 lister.nextMarker = ""
679 if resp.IsTruncated {
680 lister.nextMarker = resp.NextMarker
682 lister.buf = make([]s3.Key, 0, len(resp.Contents))
683 for _, key := range resp.Contents {
684 if !strings.HasPrefix(key.Key, lister.Prefix) {
685 log.Printf("warning: s3Lister: S3 Bucket.List(prefix=%q) returned key %q", lister.Prefix, key.Key)
688 lister.buf = append(lister.buf, key)
692 func (lister *s3Lister) pop() (k *s3.Key) {
693 if len(lister.buf) > 0 {
695 lister.buf = lister.buf[1:]