1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
24 "git.curoverse.com/arvados.git/sdk/go/arvados"
25 "github.com/Azure/azure-sdk-for-go/storage"
28 const azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
32 azureStorageAccountName string
33 azureStorageAccountKeyFile string
34 azureStorageReplication int
35 azureWriteRaceInterval = 15 * time.Second
36 azureWriteRacePollTime = time.Second
39 func readKeyFromFile(file string) (string, error) {
40 buf, err := ioutil.ReadFile(file)
42 return "", errors.New("reading key from " + file + ": " + err.Error())
44 accountKey := strings.TrimSpace(string(buf))
46 return "", errors.New("empty account key in " + file)
48 return accountKey, nil
51 type azureVolumeAdder struct {
55 // String implements flag.Value
56 func (s *azureVolumeAdder) String() string {
60 func (s *azureVolumeAdder) Set(containerName string) error {
61 s.Config.Volumes = append(s.Config.Volumes, &AzureBlobVolume{
62 ContainerName: containerName,
63 StorageAccountName: azureStorageAccountName,
64 StorageAccountKeyFile: azureStorageAccountKeyFile,
65 AzureReplication: azureStorageReplication,
66 ReadOnly: deprecated.flagReadonly,
72 VolumeTypes = append(VolumeTypes, func() VolumeWithExamples { return &AzureBlobVolume{} })
74 flag.Var(&azureVolumeAdder{theConfig},
75 "azure-storage-container-volume",
76 "Use the given container as a storage volume. Can be given multiple times.")
78 &azureStorageAccountName,
79 "azure-storage-account-name",
81 "Azure storage account name used for subsequent --azure-storage-container-volume arguments.")
83 &azureStorageAccountKeyFile,
84 "azure-storage-account-key-file",
86 "`File` containing the account key used for subsequent --azure-storage-container-volume arguments.")
88 &azureStorageReplication,
89 "azure-storage-replication",
91 "Replication level to report to clients when data is stored in an Azure container.")
94 "azure-max-get-bytes",
96 fmt.Sprintf("Maximum bytes to request in a single GET request. If smaller than %d, use multiple concurrent range requests to retrieve a block.", BlockSize))
99 // An AzureBlobVolume stores and retrieves blocks in an Azure Blob
101 type AzureBlobVolume struct {
102 StorageAccountName string
103 StorageAccountKeyFile string
104 StorageBaseURL string // "" means default, "core.windows.net"
108 RequestTimeout arvados.Duration
109 StorageClasses []string
111 azClient storage.Client
112 container *azureContainer
115 // singleSender is a single-attempt storage.Sender.
116 type singleSender struct{}
118 // Send performs req exactly once.
119 func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Response, err error) {
120 return c.HTTPClient.Do(req)
123 // Examples implements VolumeWithExamples.
124 func (*AzureBlobVolume) Examples() []Volume {
127 StorageAccountName: "example-account-name",
128 StorageAccountKeyFile: "/etc/azure_storage_account_key.txt",
129 ContainerName: "example-container-name",
131 RequestTimeout: azureDefaultRequestTimeout,
134 StorageAccountName: "cn-account-name",
135 StorageAccountKeyFile: "/etc/azure_cn_storage_account_key.txt",
136 StorageBaseURL: "core.chinacloudapi.cn",
137 ContainerName: "cn-container-name",
139 RequestTimeout: azureDefaultRequestTimeout,
144 // Type implements Volume.
145 func (v *AzureBlobVolume) Type() string {
149 // Start implements Volume.
150 func (v *AzureBlobVolume) Start() error {
151 if v.ContainerName == "" {
152 return errors.New("no container name given")
154 if v.StorageAccountName == "" || v.StorageAccountKeyFile == "" {
155 return errors.New("StorageAccountName and StorageAccountKeyFile must be given")
157 accountKey, err := readKeyFromFile(v.StorageAccountKeyFile)
161 if v.StorageBaseURL == "" {
162 v.StorageBaseURL = storage.DefaultBaseURL
164 v.azClient, err = storage.NewClient(v.StorageAccountName, accountKey, v.StorageBaseURL, storage.DefaultAPIVersion, true)
166 return fmt.Errorf("creating Azure storage client: %s", err)
168 v.azClient.Sender = &singleSender{}
170 if v.RequestTimeout == 0 {
171 v.RequestTimeout = azureDefaultRequestTimeout
173 v.azClient.HTTPClient = &http.Client{
174 Timeout: time.Duration(v.RequestTimeout),
176 bs := v.azClient.GetBlobService()
177 v.container = &azureContainer{
178 ctr: bs.GetContainerReference(v.ContainerName),
181 if ok, err := v.container.Exists(); err != nil {
184 return fmt.Errorf("Azure container %q does not exist", v.ContainerName)
189 // DeviceID returns a globally unique ID for the storage container.
190 func (v *AzureBlobVolume) DeviceID() string {
191 return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
194 // Return true if expires_at metadata attribute is found on the block
195 func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
196 metadata, err := v.container.GetBlobMetadata(loc)
198 return false, metadata, v.translateError(err)
200 if metadata["expires_at"] != "" {
201 return true, metadata, nil
203 return false, metadata, nil
206 // Get reads a Keep block that has been stored as a block blob in the
209 // If the block is younger than azureWriteRaceInterval and is
210 // unexpectedly empty, assume a PutBlob operation is in progress, and
211 // wait for it to finish writing.
212 func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
213 trashed, _, err := v.checkTrashed(loc)
218 return 0, os.ErrNotExist
220 var deadline time.Time
221 haveDeadline := false
222 size, err := v.get(ctx, loc, buf)
223 for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
224 // Seeing a brand new empty block probably means we're
225 // in a race with CreateBlob, which under the hood
226 // (apparently) does "CreateEmpty" and "CommitData"
227 // with no additional transaction locking.
229 t, err := v.Mtime(loc)
231 log.Print("Got empty block (possible race) but Mtime failed: ", err)
234 deadline = t.Add(azureWriteRaceInterval)
235 if time.Now().After(deadline) {
238 log.Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
240 } else if time.Now().After(deadline) {
246 case <-time.After(azureWriteRacePollTime):
248 size, err = v.get(ctx, loc, buf)
251 log.Printf("Race ended with size==%d", size)
256 func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
257 ctx, cancel := context.WithCancel(ctx)
259 expectSize := len(buf)
260 if azureMaxGetBytes < BlockSize {
261 // Unfortunately the handler doesn't tell us how long the blob
262 // is expected to be, so we have to ask Azure.
263 props, err := v.container.GetBlobProperties(loc)
265 return 0, v.translateError(err)
267 if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
268 return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
270 expectSize = int(props.ContentLength)
277 // We'll update this actualSize if/when we get the last piece.
279 pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes
280 errors := make(chan error, pieces)
281 var wg sync.WaitGroup
283 for p := 0; p < pieces; p++ {
284 // Each goroutine retrieves one piece. If we hit an
285 // error, it is sent to the errors chan so get() can
286 // return it -- but only if the error happens before
287 // ctx is done. This way, if ctx is done before we hit
288 // any other error (e.g., requesting client has hung
289 // up), we return the original ctx.Err() instead of
290 // the secondary errors from the transfers that got
291 // interrupted as a result.
294 startPos := p * azureMaxGetBytes
295 endPos := startPos + azureMaxGetBytes
296 if endPos > expectSize {
299 var rdr io.ReadCloser
301 gotRdr := make(chan struct{})
304 if startPos == 0 && endPos == expectSize {
305 rdr, err = v.container.GetBlob(loc)
307 rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
327 // Close the reader when the client
328 // hangs up or another piece fails
329 // (possibly interrupting ReadFull())
330 // or when all pieces succeed and
335 n, err := io.ReadFull(rdr, buf[startPos:endPos])
336 if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
337 // If we don't know the actual size,
338 // and just tried reading 64 MiB, it's
339 // normal to encounter EOF.
340 } else if err != nil {
341 if ctx.Err() == nil {
348 actualSize = startPos + n
355 return 0, v.translateError(<-errors)
357 if ctx.Err() != nil {
360 return actualSize, nil
363 // Compare the given data with existing stored data.
364 func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
365 trashed, _, err := v.checkTrashed(loc)
370 return os.ErrNotExist
372 var rdr io.ReadCloser
373 gotRdr := make(chan struct{})
376 rdr, err = v.container.GetBlob(loc)
390 return v.translateError(err)
393 return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
396 // Put stores a Keep block as a block blob in the container.
397 func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
399 return MethodDisabledError
401 // Send the block data through a pipe, so that (if we need to)
402 // we can close the pipe early and abandon our
403 // CreateBlockBlobFromReader() goroutine, without worrying
404 // about CreateBlockBlobFromReader() accessing our block
405 // buffer after we release it.
406 bufr, bufw := io.Pipe()
408 io.Copy(bufw, bytes.NewReader(block))
411 errChan := make(chan error)
413 var body io.Reader = bufr
415 // We must send a "Content-Length: 0" header,
416 // but the http client interprets
417 // ContentLength==0 as "unknown" unless it can
418 // confirm by introspection that Body will
423 errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
427 theConfig.debugLogf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
428 // Our pipe might be stuck in Write(), waiting for
429 // io.Copy() to read. If so, un-stick it. This means
430 // CreateBlockBlobFromReader will get corrupt data,
431 // but that's OK: the size won't match, so the write
433 go io.Copy(ioutil.Discard, bufr)
434 // CloseWithError() will return once pending I/O is done.
435 bufw.CloseWithError(ctx.Err())
436 theConfig.debugLogf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
438 case err := <-errChan:
443 // Touch updates the last-modified property of a block blob.
444 func (v *AzureBlobVolume) Touch(loc string) error {
446 return MethodDisabledError
448 trashed, metadata, err := v.checkTrashed(loc)
453 return os.ErrNotExist
456 metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
457 return v.container.SetBlobMetadata(loc, metadata, nil)
460 // Mtime returns the last-modified property of a block blob.
461 func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
462 trashed, _, err := v.checkTrashed(loc)
464 return time.Time{}, err
467 return time.Time{}, os.ErrNotExist
470 props, err := v.container.GetBlobProperties(loc)
472 return time.Time{}, err
474 return time.Time(props.LastModified), nil
477 // IndexTo writes a list of Keep blocks that are stored in the
479 func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
480 params := storage.ListBlobsParameters{
482 Include: &storage.IncludeBlobDataset{Metadata: true},
485 resp, err := v.container.ListBlobs(params)
489 for _, b := range resp.Blobs {
490 if !v.isKeepBlock(b.Name) {
493 modtime := time.Time(b.Properties.LastModified)
494 if b.Properties.ContentLength == 0 && modtime.Add(azureWriteRaceInterval).After(time.Now()) {
495 // A new zero-length blob is probably
496 // just a new non-empty blob that
497 // hasn't committed its data yet (see
498 // Get()), and in any case has no
502 if b.Metadata["expires_at"] != "" {
503 // Trashed blob; exclude it from response
506 fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, modtime.UnixNano())
508 if resp.NextMarker == "" {
511 params.Marker = resp.NextMarker
515 // Trash a Keep block.
516 func (v *AzureBlobVolume) Trash(loc string) error {
518 return MethodDisabledError
521 // Ideally we would use If-Unmodified-Since, but that
522 // particular condition seems to be ignored by Azure. Instead,
523 // we get the Etag before checking Mtime, and use If-Match to
524 // ensure we don't delete data if Put() or Touch() happens
525 // between our calls to Mtime() and DeleteBlob().
526 props, err := v.container.GetBlobProperties(loc)
530 if t, err := v.Mtime(loc); err != nil {
532 } else if time.Since(t) < theConfig.BlobSignatureTTL.Duration() {
536 // If TrashLifetime == 0, just delete it
537 if theConfig.TrashLifetime == 0 {
538 return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
543 // Otherwise, mark as trash
544 return v.container.SetBlobMetadata(loc, storage.BlobMetadata{
545 "expires_at": fmt.Sprintf("%d", time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()),
546 }, &storage.SetBlobMetadataOptions{
551 // Untrash a Keep block.
552 // Delete the expires_at metadata attribute
553 func (v *AzureBlobVolume) Untrash(loc string) error {
554 // if expires_at does not exist, return NotFoundError
555 metadata, err := v.container.GetBlobMetadata(loc)
557 return v.translateError(err)
559 if metadata["expires_at"] == "" {
560 return os.ErrNotExist
563 // reset expires_at metadata attribute
564 metadata["expires_at"] = ""
565 err = v.container.SetBlobMetadata(loc, metadata, nil)
566 return v.translateError(err)
569 // Status returns a VolumeStatus struct with placeholder data.
570 func (v *AzureBlobVolume) Status() *VolumeStatus {
571 return &VolumeStatus{
573 BytesFree: BlockSize * 1000,
578 // String returns a volume label, including the container name.
579 func (v *AzureBlobVolume) String() string {
580 return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
583 // Writable returns true, unless the -readonly flag was on when the
585 func (v *AzureBlobVolume) Writable() bool {
589 // Replication returns the replication level of the container, as
590 // specified by the -azure-storage-replication argument.
591 func (v *AzureBlobVolume) Replication() int {
592 return v.AzureReplication
595 // GetStorageClasses implements Volume
596 func (v *AzureBlobVolume) GetStorageClasses() []string {
597 return v.StorageClasses
600 // If possible, translate an Azure SDK error to a recognizable error
601 // like os.ErrNotExist.
602 func (v *AzureBlobVolume) translateError(err error) error {
606 case strings.Contains(err.Error(), "Not Found"):
607 // "storage: service returned without a response body (404 Not Found)"
608 return os.ErrNotExist
614 var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
616 func (v *AzureBlobVolume) isKeepBlock(s string) bool {
617 return keepBlockRegexp.MatchString(s)
620 // EmptyTrash looks for trashed blocks that exceeded TrashLifetime
621 // and deletes them from the volume.
622 func (v *AzureBlobVolume) EmptyTrash() {
623 var bytesDeleted, bytesInTrash int64
624 var blocksDeleted, blocksInTrash int64
626 doBlob := func(b storage.Blob) {
627 // Check whether the block is flagged as trash
628 if b.Metadata["expires_at"] == "" {
632 atomic.AddInt64(&blocksInTrash, 1)
633 atomic.AddInt64(&bytesInTrash, b.Properties.ContentLength)
635 expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
637 log.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
641 if expiresAt > time.Now().Unix() {
645 err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
646 IfMatch: b.Properties.Etag,
649 log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
652 atomic.AddInt64(&blocksDeleted, 1)
653 atomic.AddInt64(&bytesDeleted, b.Properties.ContentLength)
656 var wg sync.WaitGroup
657 todo := make(chan storage.Blob, theConfig.EmptyTrashWorkers)
658 for i := 0; i < 1 || i < theConfig.EmptyTrashWorkers; i++ {
662 for b := range todo {
668 params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
670 resp, err := v.container.ListBlobs(params)
672 log.Printf("EmptyTrash: ListBlobs: %v", err)
675 for _, b := range resp.Blobs {
678 if resp.NextMarker == "" {
681 params.Marker = resp.NextMarker
686 log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
689 // InternalStats returns bucket I/O and API call counters.
690 func (v *AzureBlobVolume) InternalStats() interface{} {
691 return &v.container.stats
694 type azureBlobStats struct {
699 GetMetadataOps uint64
700 GetPropertiesOps uint64
702 SetMetadataOps uint64
707 func (s *azureBlobStats) TickErr(err error) {
711 errType := fmt.Sprintf("%T", err)
712 if err, ok := err.(storage.AzureStorageServiceError); ok {
713 errType = errType + fmt.Sprintf(" %d (%s)", err.StatusCode, err.Code)
715 log.Printf("errType %T, err %s", err, err)
716 s.statsTicker.TickErr(err, errType)
719 // azureContainer wraps storage.Container in order to count I/O and
721 type azureContainer struct {
722 ctr *storage.Container
726 func (c *azureContainer) Exists() (bool, error) {
727 c.stats.Tick(&c.stats.Ops)
728 ok, err := c.ctr.Exists()
733 func (c *azureContainer) GetBlobMetadata(bname string) (storage.BlobMetadata, error) {
734 c.stats.Tick(&c.stats.Ops, &c.stats.GetMetadataOps)
735 b := c.ctr.GetBlobReference(bname)
736 err := b.GetMetadata(nil)
738 return b.Metadata, err
741 func (c *azureContainer) GetBlobProperties(bname string) (*storage.BlobProperties, error) {
742 c.stats.Tick(&c.stats.Ops, &c.stats.GetPropertiesOps)
743 b := c.ctr.GetBlobReference(bname)
744 err := b.GetProperties(nil)
746 return &b.Properties, err
749 func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
750 c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
751 b := c.ctr.GetBlobReference(bname)
752 rdr, err := b.Get(nil)
754 return NewCountingReader(rdr, c.stats.TickInBytes), err
757 func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
758 c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
759 b := c.ctr.GetBlobReference(bname)
760 rdr, err := b.GetRange(&storage.GetBlobRangeOptions{
761 Range: &storage.BlobRange{
762 Start: uint64(start),
765 GetBlobOptions: opts,
768 return NewCountingReader(rdr, c.stats.TickInBytes), err
771 // If we give it an io.Reader that doesn't also have a Len() int
772 // method, the Azure SDK determines data size by copying the data into
773 // a new buffer, which is not a good use of memory.
774 type readerWithAzureLen struct {
779 // Len satisfies the private lener interface in azure-sdk-for-go.
780 func (r *readerWithAzureLen) Len() int {
784 func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr io.Reader, opts *storage.PutBlobOptions) error {
785 c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
787 rdr = &readerWithAzureLen{
788 Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
792 b := c.ctr.GetBlobReference(bname)
793 err := b.CreateBlockBlobFromReader(rdr, opts)
798 func (c *azureContainer) SetBlobMetadata(bname string, m storage.BlobMetadata, opts *storage.SetBlobMetadataOptions) error {
799 c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
800 b := c.ctr.GetBlobReference(bname)
802 err := b.SetMetadata(opts)
807 func (c *azureContainer) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
808 c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
809 resp, err := c.ctr.ListBlobs(params)
814 func (c *azureContainer) DeleteBlob(bname string, opts *storage.DeleteBlobOptions) error {
815 c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
816 b := c.ctr.GetBlobReference(bname)
817 err := b.Delete(opts)