X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/f0ea4324260fb4dc6df693d9548285bb64b3b69f..f9e3d32c92920a806d50548dbb9b6c0aab7d8c54:/services/keepstore/azure_blob_volume.go diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go index e9fda2ab76..48cb02647c 100644 --- a/services/keepstore/azure_blob_volume.go +++ b/services/keepstore/azure_blob_volume.go @@ -10,13 +10,16 @@ import ( "log" "os" "regexp" + "strconv" "strings" + "sync" "time" "github.com/curoverse/azure-sdk-for-go/storage" ) var ( + azureMaxGetBytes int azureStorageAccountName string azureStorageAccountKeyFile string azureStorageReplication int @@ -41,6 +44,10 @@ type azureVolumeAdder struct { } func (s *azureVolumeAdder) Set(containerName string) error { + if trashLifetime != 0 { + return ErrNotImplemented + } + if containerName == "" { return errors.New("no container name given") } @@ -85,6 +92,11 @@ func init() { "azure-storage-replication", 3, "Replication level to report to clients when data is stored in an Azure container.") + flag.IntVar( + &azureMaxGetBytes, + "azure-max-get-bytes", + BlockSize, + fmt.Sprintf("Maximum bytes to request in a single GET request. If smaller than %d, use multiple concurrent range requests to retrieve a block.", BlockSize)) } // An AzureBlobVolume stores and retrieves blocks in an Azure Blob @@ -122,17 +134,36 @@ func (v *AzureBlobVolume) Check() error { return nil } +// Return true if expires_at metadata attribute is found on the block +func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) { + metadata, err := v.bsClient.GetBlobMetadata(v.containerName, loc) + if err != nil { + return false, metadata, v.translateError(err) + } + if metadata["expires_at"] != "" { + return true, metadata, nil + } + return false, metadata, nil +} + // Get reads a Keep block that has been stored as a block blob in the // container. // // If the block is younger than azureWriteRaceInterval and is // unexpectedly empty, assume a PutBlob operation is in progress, and // wait for it to finish writing. -func (v *AzureBlobVolume) Get(loc string) ([]byte, error) { +func (v *AzureBlobVolume) Get(loc string, buf []byte) (int, error) { + trashed, _, err := v.checkTrashed(loc) + if err != nil { + return 0, err + } + if trashed { + return 0, os.ErrNotExist + } var deadline time.Time haveDeadline := false - buf, err := v.get(loc) - for err == nil && len(buf) == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" { + size, err := v.get(loc, buf) + for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" { // Seeing a brand new empty block probably means we're // in a race with CreateBlob, which under the hood // (apparently) does "CreateEmpty" and "CommitData" @@ -152,35 +183,91 @@ func (v *AzureBlobVolume) Get(loc string) ([]byte, error) { } else if time.Now().After(deadline) { break } - bufs.Put(buf) time.Sleep(azureWriteRacePollTime) - buf, err = v.get(loc) + size, err = v.get(loc, buf) } if haveDeadline { - log.Printf("Race ended with len(buf)==%d", len(buf)) + log.Printf("Race ended with size==%d", size) } - return buf, err + return size, err } -func (v *AzureBlobVolume) get(loc string) ([]byte, error) { - rdr, err := v.bsClient.GetBlob(v.containerName, loc) - if err != nil { - return nil, v.translateError(err) +func (v *AzureBlobVolume) get(loc string, buf []byte) (int, error) { + expectSize := len(buf) + if azureMaxGetBytes < BlockSize { + // Unfortunately the handler doesn't tell us how long the blob + // is expected to be, so we have to ask Azure. + props, err := v.bsClient.GetBlobProperties(v.containerName, loc) + if err != nil { + return 0, v.translateError(err) + } + if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 { + return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize) + } + expectSize = int(props.ContentLength) } - defer rdr.Close() - buf := bufs.Get(BlockSize) - n, err := io.ReadFull(rdr, buf) - switch err { - case nil, io.EOF, io.ErrUnexpectedEOF: - return buf[:n], nil - default: - bufs.Put(buf) - return nil, err + + if expectSize == 0 { + return 0, nil } + + // We'll update this actualSize if/when we get the last piece. + actualSize := -1 + pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes + errors := make([]error, pieces) + var wg sync.WaitGroup + wg.Add(pieces) + for p := 0; p < pieces; p++ { + go func(p int) { + defer wg.Done() + startPos := p * azureMaxGetBytes + endPos := startPos + azureMaxGetBytes + if endPos > expectSize { + endPos = expectSize + } + var rdr io.ReadCloser + var err error + if startPos == 0 && endPos == expectSize { + rdr, err = v.bsClient.GetBlob(v.containerName, loc) + } else { + rdr, err = v.bsClient.GetBlobRange(v.containerName, loc, fmt.Sprintf("%d-%d", startPos, endPos-1), nil) + } + if err != nil { + errors[p] = err + return + } + defer rdr.Close() + n, err := io.ReadFull(rdr, buf[startPos:endPos]) + if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) { + // If we don't know the actual size, + // and just tried reading 64 MiB, it's + // normal to encounter EOF. + } else if err != nil { + errors[p] = err + } + if p == pieces-1 { + actualSize = startPos + n + } + }(p) + } + wg.Wait() + for _, err := range errors { + if err != nil { + return 0, v.translateError(err) + } + } + return actualSize, nil } // Compare the given data with existing stored data. func (v *AzureBlobVolume) Compare(loc string, expect []byte) error { + trashed, _, err := v.checkTrashed(loc) + if err != nil { + return err + } + if trashed { + return os.ErrNotExist + } rdr, err := v.bsClient.GetBlob(v.containerName, loc) if err != nil { return v.translateError(err) @@ -189,12 +276,12 @@ func (v *AzureBlobVolume) Compare(loc string, expect []byte) error { return compareReaderWithBuf(rdr, expect, loc[:32]) } -// Put sotres a Keep block as a block blob in the container. +// Put stores a Keep block as a block blob in the container. func (v *AzureBlobVolume) Put(loc string, block []byte) error { if v.readonly { return MethodDisabledError } - return v.bsClient.CreateBlockBlobFromReader(v.containerName, loc, uint64(len(block)), bytes.NewReader(block)) + return v.bsClient.CreateBlockBlobFromReader(v.containerName, loc, uint64(len(block)), bytes.NewReader(block), nil) } // Touch updates the last-modified property of a block blob. @@ -202,13 +289,28 @@ func (v *AzureBlobVolume) Touch(loc string) error { if v.readonly { return MethodDisabledError } - return v.bsClient.SetBlobMetadata(v.containerName, loc, map[string]string{ - "touch": fmt.Sprintf("%d", time.Now()), - }) + trashed, metadata, err := v.checkTrashed(loc) + if err != nil { + return err + } + if trashed { + return os.ErrNotExist + } + + metadata["touch"] = fmt.Sprintf("%d", time.Now()) + return v.bsClient.SetBlobMetadata(v.containerName, loc, metadata, nil) } // Mtime returns the last-modified property of a block blob. func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) { + trashed, _, err := v.checkTrashed(loc) + if err != nil { + return time.Time{}, err + } + if trashed { + return time.Time{}, os.ErrNotExist + } + props, err := v.bsClient.GetBlobProperties(v.containerName, loc) if err != nil { return time.Time{}, err @@ -220,7 +322,8 @@ func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) { // container. func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error { params := storage.ListBlobsParameters{ - Prefix: prefix, + Prefix: prefix, + Include: "metadata", } for { resp, err := v.bsClient.ListBlobs(v.containerName, params) @@ -243,7 +346,11 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error { // value. continue } - fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.Unix()) + if b.Metadata["expires_at"] != "" { + // Trashed blob; exclude it from response + continue + } + fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.UnixNano()) } if resp.NextMarker == "" { return nil @@ -252,11 +359,12 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error { } } -// Delete a Keep block. -func (v *AzureBlobVolume) Delete(loc string) error { +// Trash a Keep block. +func (v *AzureBlobVolume) Trash(loc string) error { if v.readonly { return MethodDisabledError } + // Ideally we would use If-Unmodified-Since, but that // particular condition seems to be ignored by Azure. Instead, // we get the Etag before checking Mtime, and use If-Match to @@ -271,11 +379,40 @@ func (v *AzureBlobVolume) Delete(loc string) error { } else if time.Since(t) < blobSignatureTTL { return nil } - return v.bsClient.DeleteBlob(v.containerName, loc, map[string]string{ + + // If trashLifetime == 0, just delete it + if trashLifetime == 0 { + return v.bsClient.DeleteBlob(v.containerName, loc, map[string]string{ + "If-Match": props.Etag, + }) + } + + // Otherwise, mark as trash + return v.bsClient.SetBlobMetadata(v.containerName, loc, map[string]string{ + "expires_at": fmt.Sprintf("%d", time.Now().Add(trashLifetime).Unix()), + }, map[string]string{ "If-Match": props.Etag, }) } +// Untrash a Keep block. +// Delete the expires_at metadata attribute +func (v *AzureBlobVolume) Untrash(loc string) error { + // if expires_at does not exist, return NotFoundError + metadata, err := v.bsClient.GetBlobMetadata(v.containerName, loc) + if err != nil { + return v.translateError(err) + } + if metadata["expires_at"] == "" { + return os.ErrNotExist + } + + // reset expires_at metadata attribute + metadata["expires_at"] = "" + err = v.bsClient.SetBlobMetadata(v.containerName, loc, metadata, nil) + return v.translateError(err) +} + // Status returns a VolumeStatus struct with placeholder data. func (v *AzureBlobVolume) Status() *VolumeStatus { return &VolumeStatus{ @@ -308,7 +445,7 @@ func (v *AzureBlobVolume) translateError(err error) error { switch { case err == nil: return err - case strings.Contains(err.Error(), "404 Not Found"): + case strings.Contains(err.Error(), "Not Found"): // "storage: service returned without a response body (404 Not Found)" return os.ErrNotExist default: @@ -317,6 +454,58 @@ func (v *AzureBlobVolume) translateError(err error) error { } var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`) + func (v *AzureBlobVolume) isKeepBlock(s string) bool { return keepBlockRegexp.MatchString(s) } + +// EmptyTrash looks for trashed blocks that exceeded trashLifetime +// and deletes them from the volume. +func (v *AzureBlobVolume) EmptyTrash() { + var bytesDeleted, bytesInTrash int64 + var blocksDeleted, blocksInTrash int + params := storage.ListBlobsParameters{Include: "metadata"} + + for { + resp, err := v.bsClient.ListBlobs(v.containerName, params) + if err != nil { + log.Printf("EmptyTrash: ListBlobs: %v", err) + break + } + for _, b := range resp.Blobs { + // Check if the block is expired + if b.Metadata["expires_at"] == "" { + continue + } + + blocksInTrash++ + bytesInTrash += b.Properties.ContentLength + + expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64) + if err != nil { + log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err) + continue + } + + if expiresAt > time.Now().Unix() { + continue + } + + err = v.bsClient.DeleteBlob(v.containerName, b.Name, map[string]string{ + "If-Match": b.Properties.Etag, + }) + if err != nil { + log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err) + continue + } + blocksDeleted++ + bytesDeleted += b.Properties.ContentLength + } + if resp.NextMarker == "" { + break + } + params.Marker = resp.NextMarker + } + + log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted) +}