1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
24 "git.curoverse.com/arvados.git/sdk/go/arvados"
25 "github.com/Azure/azure-sdk-for-go/storage"
26 "github.com/prometheus/client_golang/prometheus"
30 azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
31 azureDefaultListBlobsMaxAttempts = 12
32 azureDefaultListBlobsRetryDelay = arvados.Duration(10 * time.Second)
37 azureStorageAccountName string
38 azureStorageAccountKeyFile string
39 azureStorageReplication int
40 azureWriteRaceInterval = 15 * time.Second
41 azureWriteRacePollTime = time.Second
44 func readKeyFromFile(file string) (string, error) {
45 buf, err := ioutil.ReadFile(file)
47 return "", errors.New("reading key from " + file + ": " + err.Error())
49 accountKey := strings.TrimSpace(string(buf))
51 return "", errors.New("empty account key in " + file)
53 return accountKey, nil
56 type azureVolumeAdder struct {
60 // String implements flag.Value
61 func (s *azureVolumeAdder) String() string {
65 func (s *azureVolumeAdder) Set(containerName string) error {
66 s.Config.Volumes = append(s.Config.Volumes, &AzureBlobVolume{
67 ContainerName: containerName,
68 StorageAccountName: azureStorageAccountName,
69 StorageAccountKeyFile: azureStorageAccountKeyFile,
70 AzureReplication: azureStorageReplication,
71 ReadOnly: deprecated.flagReadonly,
77 VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &AzureBlobVolume{} })
79 flag.Var(&azureVolumeAdder{theConfig},
80 "azure-storage-container-volume",
81 "Use the given container as a storage volume. Can be given multiple times.")
83 &azureStorageAccountName,
84 "azure-storage-account-name",
86 "Azure storage account name used for subsequent --azure-storage-container-volume arguments.")
88 &azureStorageAccountKeyFile,
89 "azure-storage-account-key-file",
91 "`File` containing the account key used for subsequent --azure-storage-container-volume arguments.")
93 &azureStorageReplication,
94 "azure-storage-replication",
96 "Replication level to report to clients when data is stored in an Azure container.")
99 "azure-max-get-bytes",
101 fmt.Sprintf("Maximum bytes to request in a single GET request. If smaller than %d, use multiple concurrent range requests to retrieve a block.", BlockSize))
104 // An AzureBlobVolume stores and retrieves blocks in an Azure Blob
106 type AzureBlobVolume struct {
107 StorageAccountName string
108 StorageAccountKeyFile string
109 StorageBaseURL string // "" means default, "core.windows.net"
113 RequestTimeout arvados.Duration
114 StorageClasses []string
115 ListBlobsRetryDelay arvados.Duration
116 ListBlobsMaxAttempts int
118 azClient storage.Client
119 container *azureContainer
122 // singleSender is a single-attempt storage.Sender.
123 type singleSender struct{}
125 // Send performs req exactly once.
126 func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Response, err error) {
127 return c.HTTPClient.Do(req)
130 // Examples implements VolumeWithExamples.
131 func (*AzureBlobVolume) Examples() []Volume {
134 StorageAccountName: "example-account-name",
135 StorageAccountKeyFile: "/etc/azure_storage_account_key.txt",
136 ContainerName: "example-container-name",
138 RequestTimeout: azureDefaultRequestTimeout,
141 StorageAccountName: "cn-account-name",
142 StorageAccountKeyFile: "/etc/azure_cn_storage_account_key.txt",
143 StorageBaseURL: "core.chinacloudapi.cn",
144 ContainerName: "cn-container-name",
146 RequestTimeout: azureDefaultRequestTimeout,
151 // Type implements Volume.
152 func (v *AzureBlobVolume) Type() string {
156 // Start implements Volume.
157 func (v *AzureBlobVolume) Start(vm *volumeMetricsVecs) error {
158 if v.ListBlobsRetryDelay == 0 {
159 v.ListBlobsRetryDelay = azureDefaultListBlobsRetryDelay
161 if v.ListBlobsMaxAttempts == 0 {
162 v.ListBlobsMaxAttempts = azureDefaultListBlobsMaxAttempts
164 if v.ContainerName == "" {
165 return errors.New("no container name given")
167 if v.StorageAccountName == "" || v.StorageAccountKeyFile == "" {
168 return errors.New("StorageAccountName and StorageAccountKeyFile must be given")
170 accountKey, err := readKeyFromFile(v.StorageAccountKeyFile)
174 if v.StorageBaseURL == "" {
175 v.StorageBaseURL = storage.DefaultBaseURL
177 v.azClient, err = storage.NewClient(v.StorageAccountName, accountKey, v.StorageBaseURL, storage.DefaultAPIVersion, true)
179 return fmt.Errorf("creating Azure storage client: %s", err)
181 v.azClient.Sender = &singleSender{}
183 if v.RequestTimeout == 0 {
184 v.RequestTimeout = azureDefaultRequestTimeout
186 v.azClient.HTTPClient = &http.Client{
187 Timeout: time.Duration(v.RequestTimeout),
189 bs := v.azClient.GetBlobService()
190 v.container = &azureContainer{
191 ctr: bs.GetContainerReference(v.ContainerName),
194 if ok, err := v.container.Exists(); err != nil {
197 return fmt.Errorf("Azure container %q does not exist", v.ContainerName)
199 // Set up prometheus metrics
200 lbls := prometheus.Labels{"device_id": v.DeviceID()}
201 v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = vm.getCounterVecsFor(lbls)
206 // DeviceID returns a globally unique ID for the storage container.
207 func (v *AzureBlobVolume) DeviceID() string {
208 return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
211 // Return true if expires_at metadata attribute is found on the block
212 func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
213 metadata, err := v.container.GetBlobMetadata(loc)
215 return false, metadata, v.translateError(err)
217 if metadata["expires_at"] != "" {
218 return true, metadata, nil
220 return false, metadata, nil
223 // Get reads a Keep block that has been stored as a block blob in the
226 // If the block is younger than azureWriteRaceInterval and is
227 // unexpectedly empty, assume a PutBlob operation is in progress, and
228 // wait for it to finish writing.
229 func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
230 trashed, _, err := v.checkTrashed(loc)
235 return 0, os.ErrNotExist
237 var deadline time.Time
238 haveDeadline := false
239 size, err := v.get(ctx, loc, buf)
240 for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
241 // Seeing a brand new empty block probably means we're
242 // in a race with CreateBlob, which under the hood
243 // (apparently) does "CreateEmpty" and "CommitData"
244 // with no additional transaction locking.
246 t, err := v.Mtime(loc)
248 log.Print("Got empty block (possible race) but Mtime failed: ", err)
251 deadline = t.Add(azureWriteRaceInterval)
252 if time.Now().After(deadline) {
255 log.Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
257 } else if time.Now().After(deadline) {
263 case <-time.After(azureWriteRacePollTime):
265 size, err = v.get(ctx, loc, buf)
268 log.Printf("Race ended with size==%d", size)
273 func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
274 ctx, cancel := context.WithCancel(ctx)
276 expectSize := len(buf)
277 if azureMaxGetBytes < BlockSize {
278 // Unfortunately the handler doesn't tell us how long the blob
279 // is expected to be, so we have to ask Azure.
280 props, err := v.container.GetBlobProperties(loc)
282 return 0, v.translateError(err)
284 if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
285 return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
287 expectSize = int(props.ContentLength)
294 // We'll update this actualSize if/when we get the last piece.
296 pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes
297 errors := make(chan error, pieces)
298 var wg sync.WaitGroup
300 for p := 0; p < pieces; p++ {
301 // Each goroutine retrieves one piece. If we hit an
302 // error, it is sent to the errors chan so get() can
303 // return it -- but only if the error happens before
304 // ctx is done. This way, if ctx is done before we hit
305 // any other error (e.g., requesting client has hung
306 // up), we return the original ctx.Err() instead of
307 // the secondary errors from the transfers that got
308 // interrupted as a result.
311 startPos := p * azureMaxGetBytes
312 endPos := startPos + azureMaxGetBytes
313 if endPos > expectSize {
316 var rdr io.ReadCloser
318 gotRdr := make(chan struct{})
321 if startPos == 0 && endPos == expectSize {
322 rdr, err = v.container.GetBlob(loc)
324 rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
344 // Close the reader when the client
345 // hangs up or another piece fails
346 // (possibly interrupting ReadFull())
347 // or when all pieces succeed and
352 n, err := io.ReadFull(rdr, buf[startPos:endPos])
353 if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
354 // If we don't know the actual size,
355 // and just tried reading 64 MiB, it's
356 // normal to encounter EOF.
357 } else if err != nil {
358 if ctx.Err() == nil {
365 actualSize = startPos + n
372 return 0, v.translateError(<-errors)
374 if ctx.Err() != nil {
377 return actualSize, nil
380 // Compare the given data with existing stored data.
381 func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
382 trashed, _, err := v.checkTrashed(loc)
387 return os.ErrNotExist
389 var rdr io.ReadCloser
390 gotRdr := make(chan struct{})
393 rdr, err = v.container.GetBlob(loc)
407 return v.translateError(err)
410 return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
413 // Put stores a Keep block as a block blob in the container.
414 func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
416 return MethodDisabledError
418 // Send the block data through a pipe, so that (if we need to)
419 // we can close the pipe early and abandon our
420 // CreateBlockBlobFromReader() goroutine, without worrying
421 // about CreateBlockBlobFromReader() accessing our block
422 // buffer after we release it.
423 bufr, bufw := io.Pipe()
425 io.Copy(bufw, bytes.NewReader(block))
428 errChan := make(chan error)
430 var body io.Reader = bufr
432 // We must send a "Content-Length: 0" header,
433 // but the http client interprets
434 // ContentLength==0 as "unknown" unless it can
435 // confirm by introspection that Body will
440 errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
444 theConfig.debugLogf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
445 // Our pipe might be stuck in Write(), waiting for
446 // io.Copy() to read. If so, un-stick it. This means
447 // CreateBlockBlobFromReader will get corrupt data,
448 // but that's OK: the size won't match, so the write
450 go io.Copy(ioutil.Discard, bufr)
451 // CloseWithError() will return once pending I/O is done.
452 bufw.CloseWithError(ctx.Err())
453 theConfig.debugLogf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
455 case err := <-errChan:
460 // Touch updates the last-modified property of a block blob.
461 func (v *AzureBlobVolume) Touch(loc string) error {
463 return MethodDisabledError
465 trashed, metadata, err := v.checkTrashed(loc)
470 return os.ErrNotExist
473 metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
474 return v.container.SetBlobMetadata(loc, metadata, nil)
477 // Mtime returns the last-modified property of a block blob.
478 func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
479 trashed, _, err := v.checkTrashed(loc)
481 return time.Time{}, err
484 return time.Time{}, os.ErrNotExist
487 props, err := v.container.GetBlobProperties(loc)
489 return time.Time{}, err
491 return time.Time(props.LastModified), nil
494 // IndexTo writes a list of Keep blocks that are stored in the
496 func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
497 params := storage.ListBlobsParameters{
499 Include: &storage.IncludeBlobDataset{Metadata: true},
501 for page := 1; ; page++ {
502 resp, err := v.listBlobs(page, params)
506 for _, b := range resp.Blobs {
507 if !v.isKeepBlock(b.Name) {
510 modtime := time.Time(b.Properties.LastModified)
511 if b.Properties.ContentLength == 0 && modtime.Add(azureWriteRaceInterval).After(time.Now()) {
512 // A new zero-length blob is probably
513 // just a new non-empty blob that
514 // hasn't committed its data yet (see
515 // Get()), and in any case has no
519 if b.Metadata["expires_at"] != "" {
520 // Trashed blob; exclude it from response
523 fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, modtime.UnixNano())
525 if resp.NextMarker == "" {
528 params.Marker = resp.NextMarker
532 // call v.container.ListBlobs, retrying if needed.
533 func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
534 for i := 0; i < v.ListBlobsMaxAttempts; i++ {
535 resp, err = v.container.ListBlobs(params)
536 err = v.translateError(err)
537 if err == VolumeBusyError {
538 log.Printf("ListBlobs: will retry page %d in %s after error: %s", page, v.ListBlobsRetryDelay, err)
539 time.Sleep(time.Duration(v.ListBlobsRetryDelay))
548 // Trash a Keep block.
549 func (v *AzureBlobVolume) Trash(loc string) error {
551 return MethodDisabledError
554 // Ideally we would use If-Unmodified-Since, but that
555 // particular condition seems to be ignored by Azure. Instead,
556 // we get the Etag before checking Mtime, and use If-Match to
557 // ensure we don't delete data if Put() or Touch() happens
558 // between our calls to Mtime() and DeleteBlob().
559 props, err := v.container.GetBlobProperties(loc)
563 if t, err := v.Mtime(loc); err != nil {
565 } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
569 // If TrashLifetime == 0, just delete it
570 if theConfig.TrashLifetime == 0 {
571 return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
576 // Otherwise, mark as trash
577 return v.container.SetBlobMetadata(loc, storage.BlobMetadata{
578 "expires_at": fmt.Sprintf("%d", time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()),
579 }, &storage.SetBlobMetadataOptions{
584 // Untrash a Keep block.
585 // Delete the expires_at metadata attribute
586 func (v *AzureBlobVolume) Untrash(loc string) error {
587 // if expires_at does not exist, return NotFoundError
588 metadata, err := v.container.GetBlobMetadata(loc)
590 return v.translateError(err)
592 if metadata["expires_at"] == "" {
593 return os.ErrNotExist
596 // reset expires_at metadata attribute
597 metadata["expires_at"] = ""
598 err = v.container.SetBlobMetadata(loc, metadata, nil)
599 return v.translateError(err)
602 // Status returns a VolumeStatus struct with placeholder data.
603 func (v *AzureBlobVolume) Status() *VolumeStatus {
604 return &VolumeStatus{
606 BytesFree: BlockSize * 1000,
611 // String returns a volume label, including the container name.
612 func (v *AzureBlobVolume) String() string {
613 return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
616 // Writable returns true, unless the -readonly flag was on when the
618 func (v *AzureBlobVolume) Writable() bool {
622 // Replication returns the replication level of the container, as
623 // specified by the -azure-storage-replication argument.
624 func (v *AzureBlobVolume) Replication() int {
625 return v.AzureReplication
628 // GetStorageClasses implements Volume
629 func (v *AzureBlobVolume) GetStorageClasses() []string {
630 return v.StorageClasses
633 // If possible, translate an Azure SDK error to a recognizable error
634 // like os.ErrNotExist.
635 func (v *AzureBlobVolume) translateError(err error) error {
639 case strings.Contains(err.Error(), "StatusCode=503"):
640 // "storage: service returned error: StatusCode=503, ErrorCode=ServerBusy, ErrorMessage=The server is busy" (See #14804)
641 return VolumeBusyError
642 case strings.Contains(err.Error(), "Not Found"):
643 // "storage: service returned without a response body (404 Not Found)"
644 return os.ErrNotExist
650 var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
652 func (v *AzureBlobVolume) isKeepBlock(s string) bool {
653 return keepBlockRegexp.MatchString(s)
656 // EmptyTrash looks for trashed blocks that exceeded TrashLifetime
657 // and deletes them from the volume.
658 func (v *AzureBlobVolume) EmptyTrash() {
659 var bytesDeleted, bytesInTrash int64
660 var blocksDeleted, blocksInTrash int64
662 doBlob := func(b storage.Blob) {
663 // Check whether the block is flagged as trash
664 if b.Metadata["expires_at"] == "" {
668 atomic.AddInt64(&blocksInTrash, 1)
669 atomic.AddInt64(&bytesInTrash, b.Properties.ContentLength)
671 expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
673 log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
677 if expiresAt > time.Now().Unix() {
681 err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
682 IfMatch: b.Properties.Etag,
685 log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
688 atomic.AddInt64(&blocksDeleted, 1)
689 atomic.AddInt64(&bytesDeleted, b.Properties.ContentLength)
692 var wg sync.WaitGroup
693 todo := make(chan storage.Blob, theConfig.EmptyTrashWorkers)
694 for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
698 for b := range todo {
704 params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
705 for page := 1; ; page++ {
706 resp, err := v.listBlobs(page, params)
708 log.Printf("EmptyTrash: ListBlobs: %v", err)
711 for _, b := range resp.Blobs {
714 if resp.NextMarker == "" {
717 params.Marker = resp.NextMarker
722 log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
725 // InternalStats returns bucket I/O and API call counters.
726 func (v *AzureBlobVolume) InternalStats() interface{} {
727 return &v.container.stats
730 type azureBlobStats struct {
735 GetMetadataOps uint64
736 GetPropertiesOps uint64
738 SetMetadataOps uint64
743 func (s *azureBlobStats) TickErr(err error) {
747 errType := fmt.Sprintf("%T", err)
748 if err, ok := err.(storage.AzureStorageServiceError); ok {
749 errType = errType + fmt.Sprintf(" %d (%s)", err.StatusCode, err.Code)
751 log.Printf("errType %T, err %s", err, err)
752 s.statsTicker.TickErr(err, errType)
755 // azureContainer wraps storage.Container in order to count I/O and
757 type azureContainer struct {
758 ctr *storage.Container
762 func (c *azureContainer) Exists() (bool, error) {
763 c.stats.TickOps("exists")
764 c.stats.Tick(&c.stats.Ops)
765 ok, err := c.ctr.Exists()
770 func (c *azureContainer) GetBlobMetadata(bname string) (storage.BlobMetadata, error) {
771 c.stats.TickOps("get_metadata")
772 c.stats.Tick(&c.stats.Ops, &c.stats.GetMetadataOps)
773 b := c.ctr.GetBlobReference(bname)
774 err := b.GetMetadata(nil)
776 return b.Metadata, err
779 func (c *azureContainer) GetBlobProperties(bname string) (*storage.BlobProperties, error) {
780 c.stats.TickOps("get_properties")
781 c.stats.Tick(&c.stats.Ops, &c.stats.GetPropertiesOps)
782 b := c.ctr.GetBlobReference(bname)
783 err := b.GetProperties(nil)
785 return &b.Properties, err
788 func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
789 c.stats.TickOps("get")
790 c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
791 b := c.ctr.GetBlobReference(bname)
792 rdr, err := b.Get(nil)
794 return NewCountingReader(rdr, c.stats.TickInBytes), err
797 func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
798 c.stats.TickOps("get_range")
799 c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
800 b := c.ctr.GetBlobReference(bname)
801 rdr, err := b.GetRange(&storage.GetBlobRangeOptions{
802 Range: &storage.BlobRange{
803 Start: uint64(start),
806 GetBlobOptions: opts,
809 return NewCountingReader(rdr, c.stats.TickInBytes), err
812 // If we give it an io.Reader that doesn't also have a Len() int
813 // method, the Azure SDK determines data size by copying the data into
814 // a new buffer, which is not a good use of memory.
815 type readerWithAzureLen struct {
820 // Len satisfies the private lener interface in azure-sdk-for-go.
821 func (r *readerWithAzureLen) Len() int {
825 func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr io.Reader, opts *storage.PutBlobOptions) error {
826 c.stats.TickOps("create")
827 c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
829 rdr = &readerWithAzureLen{
830 Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
834 b := c.ctr.GetBlobReference(bname)
835 err := b.CreateBlockBlobFromReader(rdr, opts)
840 func (c *azureContainer) SetBlobMetadata(bname string, m storage.BlobMetadata, opts *storage.SetBlobMetadataOptions) error {
841 c.stats.TickOps("set_metadata")
842 c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
843 b := c.ctr.GetBlobReference(bname)
845 err := b.SetMetadata(opts)
850 func (c *azureContainer) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
851 c.stats.TickOps("list")
852 c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
853 resp, err := c.ctr.ListBlobs(params)
858 func (c *azureContainer) DeleteBlob(bname string, opts *storage.DeleteBlobOptions) error {
859 c.stats.TickOps("delete")
860 c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
861 b := c.ctr.GetBlobReference(bname)
862 err := b.Delete(opts)