1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
22 "git.arvados.org/arvados.git/sdk/go/arvados"
23 "git.arvados.org/arvados.git/sdk/go/ctxlog"
24 "github.com/Azure/azure-sdk-for-go/storage"
25 "github.com/prometheus/client_golang/prometheus"
26 "github.com/sirupsen/logrus"
30 driver["Azure"] = newAzureBlobVolume
33 func newAzureBlobVolume(params newVolumeParams) (volume, error) {
34 v := &AzureBlobVolume{
35 RequestTimeout: azureDefaultRequestTimeout,
36 WriteRaceInterval: azureDefaultWriteRaceInterval,
37 WriteRacePollTime: azureDefaultWriteRacePollTime,
38 cluster: params.Cluster,
39 volume: params.ConfigVolume,
40 logger: params.Logger,
41 metrics: params.MetricsVecs,
42 bufferPool: params.BufferPool,
44 err := json.Unmarshal(params.ConfigVolume.DriverParameters, &v)
48 if v.ListBlobsRetryDelay == 0 {
49 v.ListBlobsRetryDelay = azureDefaultListBlobsRetryDelay
51 if v.ListBlobsMaxAttempts == 0 {
52 v.ListBlobsMaxAttempts = azureDefaultListBlobsMaxAttempts
54 if v.StorageBaseURL == "" {
55 v.StorageBaseURL = storage.DefaultBaseURL
57 if v.ContainerName == "" || v.StorageAccountName == "" || v.StorageAccountKey == "" {
58 return nil, errors.New("DriverParameters: ContainerName, StorageAccountName, and StorageAccountKey must be provided")
60 azc, err := storage.NewClient(v.StorageAccountName, v.StorageAccountKey, v.StorageBaseURL, storage.DefaultAPIVersion, true)
62 return nil, fmt.Errorf("creating Azure storage client: %s", err)
65 v.azClient.Sender = &singleSender{}
66 v.azClient.HTTPClient = &http.Client{
67 Timeout: time.Duration(v.RequestTimeout),
69 bs := v.azClient.GetBlobService()
70 v.container = &azureContainer{
71 ctr: bs.GetContainerReference(v.ContainerName),
74 if ok, err := v.container.Exists(); err != nil {
77 return nil, fmt.Errorf("Azure container %q does not exist: %s", v.ContainerName, err)
82 func (v *AzureBlobVolume) check() error {
83 lbls := prometheus.Labels{"device_id": v.DeviceID()}
84 v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
89 azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
90 azureDefaultListBlobsMaxAttempts = 12
91 azureDefaultListBlobsRetryDelay = arvados.Duration(10 * time.Second)
92 azureDefaultWriteRaceInterval = arvados.Duration(15 * time.Second)
93 azureDefaultWriteRacePollTime = arvados.Duration(time.Second)
96 // An AzureBlobVolume stores and retrieves blocks in an Azure Blob
98 type AzureBlobVolume struct {
99 StorageAccountName string
100 StorageAccountKey string
101 StorageBaseURL string // "" means default, "core.windows.net"
103 RequestTimeout arvados.Duration
104 ListBlobsRetryDelay arvados.Duration
105 ListBlobsMaxAttempts int
107 WriteRaceInterval arvados.Duration
108 WriteRacePollTime arvados.Duration
110 cluster *arvados.Cluster
111 volume arvados.Volume
112 logger logrus.FieldLogger
113 metrics *volumeMetricsVecs
114 bufferPool *bufferPool
115 azClient storage.Client
116 container *azureContainer
119 // singleSender is a single-attempt storage.Sender.
120 type singleSender struct{}
122 // Send performs req exactly once.
123 func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Response, err error) {
124 return c.HTTPClient.Do(req)
127 // DeviceID returns a globally unique ID for the storage container.
128 func (v *AzureBlobVolume) DeviceID() string {
129 return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
132 // Return true if expires_at metadata attribute is found on the block
133 func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
134 metadata, err := v.container.GetBlobMetadata(loc)
136 return false, metadata, v.translateError(err)
138 if metadata["expires_at"] != "" {
139 return true, metadata, nil
141 return false, metadata, nil
144 // BlockRead reads a Keep block that has been stored as a block blob
147 // If the block is younger than azureWriteRaceInterval and is
148 // unexpectedly empty, assume a BlockWrite operation is in progress,
149 // and wait for it to finish writing.
150 func (v *AzureBlobVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
151 trashed, _, err := v.checkTrashed(hash)
156 return 0, os.ErrNotExist
158 buf, err := v.bufferPool.GetContext(ctx)
162 defer v.bufferPool.Put(buf)
163 streamer := newStreamWriterAt(writeTo, 65536, buf)
164 defer streamer.Close()
165 var deadline time.Time
166 size, err := v.get(ctx, hash, streamer)
167 for err == nil && size == 0 && streamer.WroteAt() == 0 && hash != "d41d8cd98f00b204e9800998ecf8427e" {
168 // Seeing a brand new empty block probably means we're
169 // in a race with CreateBlob, which under the hood
170 // (apparently) does "CreateEmpty" and "CommitData"
171 // with no additional transaction locking.
172 if deadline.IsZero() {
173 t, err := v.Mtime(hash)
175 ctxlog.FromContext(ctx).Print("Got empty block (possible race) but Mtime failed: ", err)
178 deadline = t.Add(v.WriteRaceInterval.Duration())
179 if time.Now().After(deadline) {
182 ctxlog.FromContext(ctx).Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", hash, time.Since(t), deadline)
183 } else if time.Now().After(deadline) {
189 case <-time.After(v.WriteRacePollTime.Duration()):
191 size, err = v.get(ctx, hash, streamer)
193 if !deadline.IsZero() {
194 ctxlog.FromContext(ctx).Printf("Race ended with size==%d", size)
198 return streamer.Wrote(), err
200 err = streamer.Close()
201 return streamer.Wrote(), err
204 func (v *AzureBlobVolume) get(ctx context.Context, hash string, dst io.WriterAt) (int, error) {
205 ctx, cancel := context.WithCancel(ctx)
208 pieceSize := BlockSize
209 if v.MaxGetBytes > 0 && v.MaxGetBytes < BlockSize {
210 pieceSize = v.MaxGetBytes
214 expectSize := BlockSize
215 if pieceSize < BlockSize {
216 // Unfortunately the handler doesn't tell us how long
217 // the blob is expected to be, so we have to ask
219 props, err := v.container.GetBlobProperties(hash)
221 return 0, v.translateError(err)
223 if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
224 return 0, fmt.Errorf("block %s invalid size %d (max %d)", hash, props.ContentLength, BlockSize)
226 expectSize = int(props.ContentLength)
227 pieces = (expectSize + pieceSize - 1) / pieceSize
234 // We'll update this actualSize if/when we get the last piece.
236 errors := make(chan error, pieces)
237 var wg sync.WaitGroup
239 for p := 0; p < pieces; p++ {
240 // Each goroutine retrieves one piece. If we hit an
241 // error, it is sent to the errors chan so get() can
242 // return it -- but only if the error happens before
243 // ctx is done. This way, if ctx is done before we hit
244 // any other error (e.g., requesting client has hung
245 // up), we return the original ctx.Err() instead of
246 // the secondary errors from the transfers that got
247 // interrupted as a result.
250 startPos := p * pieceSize
251 endPos := startPos + pieceSize
252 if endPos > expectSize {
255 var rdr io.ReadCloser
257 gotRdr := make(chan struct{})
260 if startPos == 0 && endPos == expectSize {
261 rdr, err = v.container.GetBlob(hash)
263 rdr, err = v.container.GetBlobRange(hash, startPos, endPos-1, nil)
283 // Close the reader when the client
284 // hangs up or another piece fails
285 // (possibly interrupting ReadFull())
286 // or when all pieces succeed and
291 n, err := io.CopyN(io.NewOffsetWriter(dst, int64(startPos)), rdr, int64(endPos-startPos))
292 if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
293 // If we don't know the actual size,
294 // and just tried reading 64 MiB, it's
295 // normal to encounter EOF.
296 } else if err != nil {
297 if ctx.Err() == nil {
304 actualSize = startPos + int(n)
311 return 0, v.translateError(<-errors)
313 if ctx.Err() != nil {
316 return actualSize, nil
319 // BlockWrite stores a block on the volume. If it already exists, its
320 // timestamp is updated.
321 func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
322 // Send the block data through a pipe, so that (if we need to)
323 // we can close the pipe early and abandon our
324 // CreateBlockBlobFromReader() goroutine, without worrying
325 // about CreateBlockBlobFromReader() accessing our data
326 // buffer after we release it.
327 bufr, bufw := io.Pipe()
332 errChan := make(chan error, 1)
334 var body io.Reader = bufr
336 // We must send a "Content-Length: 0" header,
337 // but the http client interprets
338 // ContentLength==0 as "unknown" unless it can
339 // confirm by introspection that Body will
344 errChan <- v.container.CreateBlockBlobFromReader(hash, len(data), body, nil)
348 ctxlog.FromContext(ctx).Debugf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
349 // bufw.CloseWithError() interrupts bufw.Write() if
350 // necessary, ensuring CreateBlockBlobFromReader can't
351 // read any more of our data slice via bufr after we
353 bufw.CloseWithError(ctx.Err())
354 ctxlog.FromContext(ctx).Debugf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
356 case err := <-errChan:
361 // BlockTouch updates the last-modified property of a block blob.
362 func (v *AzureBlobVolume) BlockTouch(hash string) error {
363 trashed, metadata, err := v.checkTrashed(hash)
368 return os.ErrNotExist
371 metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
372 return v.container.SetBlobMetadata(hash, metadata, nil)
375 // Mtime returns the last-modified property of a block blob.
376 func (v *AzureBlobVolume) Mtime(hash string) (time.Time, error) {
377 trashed, _, err := v.checkTrashed(hash)
379 return time.Time{}, err
382 return time.Time{}, os.ErrNotExist
385 props, err := v.container.GetBlobProperties(hash)
387 return time.Time{}, err
389 return time.Time(props.LastModified), nil
392 // Index writes a list of Keep blocks that are stored in the
394 func (v *AzureBlobVolume) Index(ctx context.Context, prefix string, writer io.Writer) error {
395 params := storage.ListBlobsParameters{
397 Include: &storage.IncludeBlobDataset{Metadata: true},
399 for page := 1; ; page++ {
404 resp, err := v.listBlobs(page, params)
408 for _, b := range resp.Blobs {
409 if !v.isKeepBlock(b.Name) {
412 modtime := time.Time(b.Properties.LastModified)
413 if b.Properties.ContentLength == 0 && modtime.Add(v.WriteRaceInterval.Duration()).After(time.Now()) {
414 // A new zero-length blob is probably
415 // just a new non-empty blob that
416 // hasn't committed its data yet (see
417 // Get()), and in any case has no
421 if b.Metadata["expires_at"] != "" {
422 // Trashed blob; exclude it from response
425 fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, modtime.UnixNano())
427 if resp.NextMarker == "" {
430 params.Marker = resp.NextMarker
434 // call v.container.ListBlobs, retrying if needed.
435 func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
436 for i := 0; i < v.ListBlobsMaxAttempts; i++ {
437 resp, err = v.container.ListBlobs(params)
438 err = v.translateError(err)
439 if err == errVolumeUnavailable {
440 v.logger.Printf("ListBlobs: will retry page %d in %s after error: %s", page, v.ListBlobsRetryDelay, err)
441 time.Sleep(time.Duration(v.ListBlobsRetryDelay))
450 // Trash a Keep block.
451 func (v *AzureBlobVolume) BlockTrash(loc string) error {
452 // Ideally we would use If-Unmodified-Since, but that
453 // particular condition seems to be ignored by Azure. Instead,
454 // we get the Etag before checking Mtime, and use If-Match to
455 // ensure we don't delete data if Put() or Touch() happens
456 // between our calls to Mtime() and DeleteBlob().
457 props, err := v.container.GetBlobProperties(loc)
461 if t, err := v.Mtime(loc); err != nil {
463 } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
467 // If BlobTrashLifetime == 0, just delete it
468 if v.cluster.Collections.BlobTrashLifetime == 0 {
469 return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
474 // Otherwise, mark as trash
475 return v.container.SetBlobMetadata(loc, storage.BlobMetadata{
476 "expires_at": fmt.Sprintf("%d", time.Now().Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Unix()),
477 }, &storage.SetBlobMetadataOptions{
482 // BlockUntrash deletes the expires_at metadata attribute for the
483 // specified block blob.
484 func (v *AzureBlobVolume) BlockUntrash(hash string) error {
485 // if expires_at does not exist, return NotFoundError
486 metadata, err := v.container.GetBlobMetadata(hash)
488 return v.translateError(err)
490 if metadata["expires_at"] == "" {
491 return os.ErrNotExist
494 // reset expires_at metadata attribute
495 metadata["expires_at"] = ""
496 err = v.container.SetBlobMetadata(hash, metadata, nil)
497 return v.translateError(err)
500 // If possible, translate an Azure SDK error to a recognizable error
501 // like os.ErrNotExist.
502 func (v *AzureBlobVolume) translateError(err error) error {
506 case strings.Contains(err.Error(), "StatusCode=503"):
507 // "storage: service returned error: StatusCode=503, ErrorCode=ServerBusy, ErrorMessage=The server is busy" (See #14804)
508 return errVolumeUnavailable
509 case strings.Contains(err.Error(), "Not Found"):
510 // "storage: service returned without a response body (404 Not Found)"
511 return os.ErrNotExist
512 case strings.Contains(err.Error(), "ErrorCode=BlobNotFound"):
513 // "storage: service returned error: StatusCode=404, ErrorCode=BlobNotFound, ErrorMessage=The specified blob does not exist.\n..."
514 return os.ErrNotExist
520 var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
522 func (v *AzureBlobVolume) isKeepBlock(s string) bool {
523 return keepBlockRegexp.MatchString(s)
526 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
527 // and deletes them from the volume.
528 func (v *AzureBlobVolume) EmptyTrash() {
529 var bytesDeleted, bytesInTrash int64
530 var blocksDeleted, blocksInTrash int64
532 doBlob := func(b storage.Blob) {
533 // Check whether the block is flagged as trash
534 if b.Metadata["expires_at"] == "" {
538 atomic.AddInt64(&blocksInTrash, 1)
539 atomic.AddInt64(&bytesInTrash, b.Properties.ContentLength)
541 expiresAt, err := strconv.ParseInt(b.Metadata["expires_at"], 10, 64)
543 v.logger.Printf("EmptyTrash: ParseInt(%v): %v", b.Metadata["expires_at"], err)
547 if expiresAt > time.Now().Unix() {
551 err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
552 IfMatch: b.Properties.Etag,
555 v.logger.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
558 atomic.AddInt64(&blocksDeleted, 1)
559 atomic.AddInt64(&bytesDeleted, b.Properties.ContentLength)
562 var wg sync.WaitGroup
563 todo := make(chan storage.Blob, v.cluster.Collections.BlobDeleteConcurrency)
564 for i := 0; i < v.cluster.Collections.BlobDeleteConcurrency; i++ {
568 for b := range todo {
574 params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
575 for page := 1; ; page++ {
576 resp, err := v.listBlobs(page, params)
578 v.logger.Printf("EmptyTrash: ListBlobs: %v", err)
581 for _, b := range resp.Blobs {
584 if resp.NextMarker == "" {
587 params.Marker = resp.NextMarker
592 v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
595 // InternalStats returns bucket I/O and API call counters.
596 func (v *AzureBlobVolume) InternalStats() interface{} {
597 return &v.container.stats
600 type azureBlobStats struct {
605 GetMetadataOps uint64
606 GetPropertiesOps uint64
608 SetMetadataOps uint64
613 func (s *azureBlobStats) TickErr(err error) {
617 errType := fmt.Sprintf("%T", err)
618 if err, ok := err.(storage.AzureStorageServiceError); ok {
619 errType = errType + fmt.Sprintf(" %d (%s)", err.StatusCode, err.Code)
621 s.statsTicker.TickErr(err, errType)
624 // azureContainer wraps storage.Container in order to count I/O and
626 type azureContainer struct {
627 ctr *storage.Container
631 func (c *azureContainer) Exists() (bool, error) {
632 c.stats.TickOps("exists")
633 c.stats.Tick(&c.stats.Ops)
634 ok, err := c.ctr.Exists()
639 func (c *azureContainer) GetBlobMetadata(bname string) (storage.BlobMetadata, error) {
640 c.stats.TickOps("get_metadata")
641 c.stats.Tick(&c.stats.Ops, &c.stats.GetMetadataOps)
642 b := c.ctr.GetBlobReference(bname)
643 err := b.GetMetadata(nil)
645 return b.Metadata, err
648 func (c *azureContainer) GetBlobProperties(bname string) (*storage.BlobProperties, error) {
649 c.stats.TickOps("get_properties")
650 c.stats.Tick(&c.stats.Ops, &c.stats.GetPropertiesOps)
651 b := c.ctr.GetBlobReference(bname)
652 err := b.GetProperties(nil)
654 return &b.Properties, err
657 func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
658 c.stats.TickOps("get")
659 c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
660 b := c.ctr.GetBlobReference(bname)
661 rdr, err := b.Get(nil)
663 return newCountingReader(rdr, c.stats.TickInBytes), err
666 func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
667 c.stats.TickOps("get_range")
668 c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
669 b := c.ctr.GetBlobReference(bname)
670 rdr, err := b.GetRange(&storage.GetBlobRangeOptions{
671 Range: &storage.BlobRange{
672 Start: uint64(start),
675 GetBlobOptions: opts,
678 return newCountingReader(rdr, c.stats.TickInBytes), err
681 // If we give it an io.Reader that doesn't also have a Len() int
682 // method, the Azure SDK determines data size by copying the data into
683 // a new buffer, which is not a good use of memory.
684 type readerWithAzureLen struct {
689 // Len satisfies the private lener interface in azure-sdk-for-go.
690 func (r *readerWithAzureLen) Len() int {
694 func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr io.Reader, opts *storage.PutBlobOptions) error {
695 c.stats.TickOps("create")
696 c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
698 rdr = &readerWithAzureLen{
699 Reader: newCountingReader(rdr, c.stats.TickOutBytes),
703 b := c.ctr.GetBlobReference(bname)
704 err := b.CreateBlockBlobFromReader(rdr, opts)
709 func (c *azureContainer) SetBlobMetadata(bname string, m storage.BlobMetadata, opts *storage.SetBlobMetadataOptions) error {
710 c.stats.TickOps("set_metadata")
711 c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
712 b := c.ctr.GetBlobReference(bname)
714 err := b.SetMetadata(opts)
719 func (c *azureContainer) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
720 c.stats.TickOps("list")
721 c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
722 resp, err := c.ctr.ListBlobs(params)
727 func (c *azureContainer) DeleteBlob(bname string, opts *storage.DeleteBlobOptions) error {
728 c.stats.TickOps("delete")
729 c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
730 b := c.ctr.GetBlobReference(bname)
731 err := b.Delete(opts)