19 "git.curoverse.com/arvados.git/sdk/go/arvados"
20 log "github.com/Sirupsen/logrus"
21 "github.com/curoverse/azure-sdk-for-go/storage"
24 const azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
28 azureStorageAccountName string
29 azureStorageAccountKeyFile string
30 azureStorageReplication int
31 azureWriteRaceInterval = 15 * time.Second
32 azureWriteRacePollTime = time.Second
35 func readKeyFromFile(file string) (string, error) {
36 buf, err := ioutil.ReadFile(file)
38 return "", errors.New("reading key from " + file + ": " + err.Error())
40 accountKey := strings.TrimSpace(string(buf))
42 return "", errors.New("empty account key in " + file)
44 return accountKey, nil
47 type azureVolumeAdder struct {
51 // String implements flag.Value
52 func (s *azureVolumeAdder) String() string {
56 func (s *azureVolumeAdder) Set(containerName string) error {
57 s.Config.Volumes = append(s.Config.Volumes, &AzureBlobVolume{
58 ContainerName: containerName,
59 StorageAccountName: azureStorageAccountName,
60 StorageAccountKeyFile: azureStorageAccountKeyFile,
61 AzureReplication: azureStorageReplication,
62 ReadOnly: deprecated.flagReadonly,
68 VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &AzureBlobVolume{} })
70 flag.Var(&azureVolumeAdder{theConfig},
71 "azure-storage-container-volume",
72 "Use the given container as a storage volume. Can be given multiple times.")
74 &azureStorageAccountName,
75 "azure-storage-account-name",
77 "Azure storage account name used for subsequent --azure-storage-container-volume arguments.")
79 &azureStorageAccountKeyFile,
80 "azure-storage-account-key-file",
82 "`File` containing the account key used for subsequent --azure-storage-container-volume arguments.")
84 &azureStorageReplication,
85 "azure-storage-replication",
87 "Replication level to report to clients when data is stored in an Azure container.")
90 "azure-max-get-bytes",
92 fmt.Sprintf("Maximum bytes to request in a single GET request. If smaller than %d, use multiple concurrent range requests to retrieve a block.", BlockSize))
95 // An AzureBlobVolume stores and retrieves blocks in an Azure Blob
97 type AzureBlobVolume struct {
98 StorageAccountName string
99 StorageAccountKeyFile string
103 RequestTimeout arvados.Duration
105 azClient storage.Client
106 bsClient *azureBlobClient
109 // azureBlobClient wraps storage.BlobStorageClient in order to count
110 // I/O and API usage stats.
111 type azureBlobClient struct {
112 client *storage.BlobStorageClient
116 type azureBlobStats struct {
122 SetMetadataOps uint64
129 // Examples implements VolumeWithExamples.
130 func (*AzureBlobVolume) Examples() []Volume {
133 StorageAccountName: "example-account-name",
134 StorageAccountKeyFile: "/etc/azure_storage_account_key.txt",
135 ContainerName: "example-container-name",
137 RequestTimeout: azureDefaultRequestTimeout,
142 // Type implements Volume.
143 func (v *AzureBlobVolume) Type() string {
147 // Start implements Volume.
148 func (v *AzureBlobVolume) Start() error {
149 if v.ContainerName == "" {
150 return errors.New("no container name given")
152 if v.StorageAccountName == "" || v.StorageAccountKeyFile == "" {
153 return errors.New("StorageAccountName and StorageAccountKeyFile must be given")
155 accountKey, err := readKeyFromFile(v.StorageAccountKeyFile)
159 v.azClient, err = storage.NewBasicClient(v.StorageAccountName, accountKey)
161 return fmt.Errorf("creating Azure storage client: %s", err)
164 if v.RequestTimeout == 0 {
165 v.RequestTimeout = azureDefaultRequestTimeout
167 v.azClient.HTTPClient = &http.Client{
168 Timeout: time.Duration(v.RequestTimeout),
170 bs := v.azClient.GetBlobService()
171 v.bsClient = &azureBlobClient{
175 ok, err := v.bsClient.ContainerExists(v.ContainerName)
180 return fmt.Errorf("Azure container %q does not exist", v.ContainerName)
185 // Return true if expires_at metadata attribute is found on the block
186 func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
187 metadata, err := v.bsClient.GetBlobMetadata(v.ContainerName, loc)
189 return false, metadata, v.translateError(err)
191 if metadata["expires_at"] != "" {
192 return true, metadata, nil
194 return false, metadata, nil
197 // Get reads a Keep block that has been stored as a block blob in the
200 // If the block is younger than azureWriteRaceInterval and is
201 // unexpectedly empty, assume a PutBlob operation is in progress, and
202 // wait for it to finish writing.
203 func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
204 trashed, _, err := v.checkTrashed(loc)
209 return 0, os.ErrNotExist
211 var deadline time.Time
212 haveDeadline := false
213 size, err := v.get(ctx, loc, buf)
214 for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
215 // Seeing a brand new empty block probably means we're
216 // in a race with CreateBlob, which under the hood
217 // (apparently) does "CreateEmpty" and "CommitData"
218 // with no additional transaction locking.
220 t, err := v.Mtime(loc)
222 log.Print("Got empty block (possible race) but Mtime failed: ", err)
225 deadline = t.Add(azureWriteRaceInterval)
226 if time.Now().After(deadline) {
229 log.Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
231 } else if time.Now().After(deadline) {
237 case <-time.After(azureWriteRacePollTime):
239 size, err = v.get(ctx, loc, buf)
242 log.Printf("Race ended with size==%d", size)
247 func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
248 ctx, cancel := context.WithCancel(ctx)
250 expectSize := len(buf)
251 if azureMaxGetBytes < BlockSize {
252 // Unfortunately the handler doesn't tell us how long the blob
253 // is expected to be, so we have to ask Azure.
254 props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
256 return 0, v.translateError(err)
258 if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
259 return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
261 expectSize = int(props.ContentLength)
268 // We'll update this actualSize if/when we get the last piece.
270 pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes
271 errors := make(chan error, pieces)
272 var wg sync.WaitGroup
274 for p := 0; p < pieces; p++ {
275 // Each goroutine retrieves one piece. If we hit an
276 // error, it is sent to the errors chan so get() can
277 // return it -- but only if the error happens before
278 // ctx is done. This way, if ctx is done before we hit
279 // any other error (e.g., requesting client has hung
280 // up), we return the original ctx.Err() instead of
281 // the secondary errors from the transfers that got
282 // interrupted as a result.
285 startPos := p * azureMaxGetBytes
286 endPos := startPos + azureMaxGetBytes
287 if endPos > expectSize {
290 var rdr io.ReadCloser
292 gotRdr := make(chan struct{})
295 if startPos == 0 && endPos == expectSize {
296 rdr, err = v.bsClient.GetBlob(v.ContainerName, loc)
298 rdr, err = v.bsClient.GetBlobRange(v.ContainerName, loc, fmt.Sprintf("%d-%d", startPos, endPos-1), nil)
318 // Close the reader when the client
319 // hangs up or another piece fails
320 // (possibly interrupting ReadFull())
321 // or when all pieces succeed and
326 n, err := io.ReadFull(rdr, buf[startPos:endPos])
327 if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
328 // If we don't know the actual size,
329 // and just tried reading 64 MiB, it's
330 // normal to encounter EOF.
331 } else if err != nil {
332 if ctx.Err() == nil {
339 actualSize = startPos + n
346 return 0, v.translateError(<-errors)
348 if ctx.Err() != nil {
351 return actualSize, nil
354 // Compare the given data with existing stored data.
355 func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
356 trashed, _, err := v.checkTrashed(loc)
361 return os.ErrNotExist
363 var rdr io.ReadCloser
364 gotRdr := make(chan struct{})
367 rdr, err = v.bsClient.GetBlob(v.ContainerName, loc)
381 return v.translateError(err)
384 return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
387 // Put stores a Keep block as a block blob in the container.
388 func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
390 return MethodDisabledError
392 // Send the block data through a pipe, so that (if we need to)
393 // we can close the pipe early and abandon our
394 // CreateBlockBlobFromReader() goroutine, without worrying
395 // about CreateBlockBlobFromReader() accessing our block
396 // buffer after we release it.
397 bufr, bufw := io.Pipe()
399 io.Copy(bufw, bytes.NewReader(block))
402 errChan := make(chan error)
404 errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), bufr, nil)
408 theConfig.debugLogf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
409 // Our pipe might be stuck in Write(), waiting for
410 // io.Copy() to read. If so, un-stick it. This means
411 // CreateBlockBlobFromReader will get corrupt data,
412 // but that's OK: the size won't match, so the write
414 go io.Copy(ioutil.Discard, bufr)
415 // CloseWithError() will return once pending I/O is done.
416 bufw.CloseWithError(ctx.Err())
417 theConfig.debugLogf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
419 case err := <-errChan:
424 // Touch updates the last-modified property of a block blob.
425 func (v *AzureBlobVolume) Touch(loc string) error {
427 return MethodDisabledError
429 trashed, metadata, err := v.checkTrashed(loc)
434 return os.ErrNotExist
437 metadata["touch"] = fmt.Sprintf("%d", time.Now())
438 return v.bsClient.SetBlobMetadata(v.ContainerName, loc, metadata, nil)
441 // Mtime returns the last-modified property of a block blob.
442 func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
443 trashed, _, err := v.checkTrashed(loc)
445 return time.Time{}, err
448 return time.Time{}, os.ErrNotExist
451 props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
453 return time.Time{}, err
455 return time.Parse(time.RFC1123, props.LastModified)
458 // IndexTo writes a list of Keep blocks that are stored in the
460 func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
461 params := storage.ListBlobsParameters{
466 resp, err := v.bsClient.ListBlobs(v.ContainerName, params)
470 for _, b := range resp.Blobs {
471 t, err := time.Parse(time.RFC1123, b.Properties.LastModified)
475 if !v.isKeepBlock(b.Name) {
478 if b.Properties.ContentLength == 0 && t.Add(azureWriteRaceInterval).After(time.Now()) {
479 // A new zero-length blob is probably
480 // just a new non-empty blob that
481 // hasn't committed its data yet (see
482 // Get()), and in any case has no
486 if b.Metadata["expires_at"] != "" {
487 // Trashed blob; exclude it from response
490 fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.UnixNano())
492 if resp.NextMarker == "" {
495 params.Marker = resp.NextMarker
499 // Trash a Keep block.
500 func (v *AzureBlobVolume) Trash(loc string) error {
502 return MethodDisabledError
505 // Ideally we would use If-Unmodified-Since, but that
506 // particular condition seems to be ignored by Azure. Instead,
507 // we get the Etag before checking Mtime, and use If-Match to
508 // ensure we don't delete data if Put() or Touch() happens
509 // between our calls to Mtime() and DeleteBlob().
510 props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
514 if t, err := v.Mtime(loc); err != nil {
516 } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
520 // If TrashLifetime == 0, just delete it
521 if theConfig.TrashLifetime == 0 {
522 return v.bsClient.DeleteBlob(v.ContainerName, loc, map[string]string{
523 "If-Match": props.Etag,
527 // Otherwise, mark as trash
528 return v.bsClient.SetBlobMetadata(v.ContainerName, loc, map[string]string{
529 "expires_at": fmt.Sprintf("%d", time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()),
530 }, map[string]string{
531 "If-Match": props.Etag,
535 // Untrash a Keep block.
536 // Delete the expires_at metadata attribute
537 func (v *AzureBlobVolume) Untrash(loc string) error {
538 // if expires_at does not exist, return NotFoundError
539 metadata, err := v.bsClient.GetBlobMetadata(v.ContainerName, loc)
541 return v.translateError(err)
543 if metadata["expires_at"] == "" {
544 return os.ErrNotExist
547 // reset expires_at metadata attribute
548 metadata["expires_at"] = ""
549 err = v.bsClient.SetBlobMetadata(v.ContainerName, loc, metadata, nil)
550 return v.translateError(err)
553 // Status returns a VolumeStatus struct with placeholder data.
554 func (v *AzureBlobVolume) Status() *VolumeStatus {
555 return &VolumeStatus{
557 BytesFree: BlockSize * 1000,
562 // String returns a volume label, including the container name.
563 func (v *AzureBlobVolume) String() string {
564 return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
567 // Writable returns true, unless the -readonly flag was on when the
569 func (v *AzureBlobVolume) Writable() bool {
573 // Replication returns the replication level of the container, as
574 // specified by the -azure-storage-replication argument.
575 func (v *AzureBlobVolume) Replication() int {
576 return v.AzureReplication
579 // If possible, translate an Azure SDK error to a recognizable error
580 // like os.ErrNotExist.
581 func (v *AzureBlobVolume) translateError(err error) error {
585 case strings.Contains(err.Error(), "Not Found"):
586 // "storage: service returned without a response body (404 Not Found)"
587 return os.ErrNotExist
593 var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
595 func (v *AzureBlobVolume) isKeepBlock(s string) bool {
596 return keepBlockRegexp.MatchString(s)
599 // EmptyTrash looks for trashed blocks that exceeded TrashLifetime
600 // and deletes them from the volume.
601 func (v *AzureBlobVolume) EmptyTrash() {
602 var bytesDeleted, bytesInTrash int64
603 var blocksDeleted, blocksInTrash int
604 params := storage.ListBlobsParameters{Include: "metadata"}
607 resp, err := v.bsClient.ListBlobs(v.ContainerName, params)
609 log.Printf("EmptyTrash: ListBlobs: %v", err)
612 for _, b := range resp.Blobs {
613 // Check if the block is expired
614 if b.Metadata["expires_at"] == "" {
619 bytesInTrash += b.Properties.ContentLength
621 expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
623 log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
627 if expiresAt > time.Now().Unix() {
631 err = v.bsClient.DeleteBlob(v.ContainerName, b.Name, map[string]string{
632 "If-Match": b.Properties.Etag,
635 log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
639 bytesDeleted += b.Properties.ContentLength
641 if resp.NextMarker == "" {
644 params.Marker = resp.NextMarker
647 log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
650 // InternalStats returns bucket I/O and API call counters.
651 func (v *AzureBlobVolume) InternalStats() interface{} {
652 return &v.bsClient.stats
655 func (c *azureBlobClient) ContainerExists(cname string) (bool, error) {
656 c.stats.Tick(&c.stats.Ops)
657 ok, err := c.client.ContainerExists(cname)
662 func (c *azureBlobClient) GetBlobMetadata(cname, bname string) (map[string]string, error) {
663 c.stats.Tick(&c.stats.Ops)
664 m, err := c.client.GetBlobMetadata(cname, bname)
669 func (c *azureBlobClient) GetBlobProperties(cname, bname string) (*storage.BlobProperties, error) {
670 c.stats.Tick(&c.stats.Ops)
671 p, err := c.client.GetBlobProperties(cname, bname)
676 func (c *azureBlobClient) GetBlob(cname, bname string) (io.ReadCloser, error) {
677 c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
678 rdr, err := c.client.GetBlob(cname, bname)
680 return NewCountingReader(rdr, c.stats.TickInBytes), err
683 func (c *azureBlobClient) GetBlobRange(cname, bname, byterange string, hdrs map[string]string) (io.ReadCloser, error) {
684 c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
685 rdr, err := c.client.GetBlobRange(cname, bname, byterange, hdrs)
687 return NewCountingReader(rdr, c.stats.TickInBytes), err
690 func (c *azureBlobClient) CreateBlockBlobFromReader(cname, bname string, size uint64, rdr io.Reader, hdrs map[string]string) error {
691 c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
692 rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
693 err := c.client.CreateBlockBlobFromReader(cname, bname, size, rdr, hdrs)
698 func (c *azureBlobClient) SetBlobMetadata(cname, bname string, m, hdrs map[string]string) error {
699 c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
700 err := c.client.SetBlobMetadata(cname, bname, m, hdrs)
705 func (c *azureBlobClient) ListBlobs(cname string, params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
706 c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
707 resp, err := c.client.ListBlobs(cname, params)
712 func (c *azureBlobClient) DeleteBlob(cname, bname string, hdrs map[string]string) error {
713 c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
714 err := c.client.DeleteBlob(cname, bname, hdrs)