ReadOnly bool
RequestTimeout arvados.Duration
- azClient storage.Client
- bsClient *azureBlobClient
+ azClient storage.Client
+ container *azureContainer
}
// Examples implements VolumeWithExamples.
Timeout: time.Duration(v.RequestTimeout),
}
bs := v.azClient.GetBlobService()
- v.bsClient = &azureBlobClient{
- client: &bs,
+ v.container = &azureContainer{
+ ctr: bs.GetContainerReference(v.ContainerName),
}
- ok, err := v.bsClient.ContainerExists(v.ContainerName)
- if err != nil {
+ if ok, err := v.container.Exists(); err != nil {
return err
- }
- if !ok {
+ } else if !ok {
return fmt.Errorf("Azure container %q does not exist", v.ContainerName)
}
return nil
// Return true if expires_at metadata attribute is found on the block
func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
- metadata, err := v.bsClient.GetBlobMetadata(v.ContainerName, loc)
+ metadata, err := v.container.GetBlobMetadata(loc)
if err != nil {
return false, metadata, v.translateError(err)
}
if azureMaxGetBytes < BlockSize {
// Unfortunately the handler doesn't tell us how long the blob
// is expected to be, so we have to ask Azure.
- props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
+ props, err := v.container.GetBlobProperties(loc)
if err != nil {
return 0, v.translateError(err)
}
go func() {
defer close(gotRdr)
if startPos == 0 && endPos == expectSize {
- rdr, err = v.bsClient.GetBlob(v.ContainerName, loc)
+ rdr, err = v.container.GetBlob(loc)
} else {
- rdr, err = v.bsClient.GetBlobRange(v.ContainerName, loc, fmt.Sprintf("%d-%d", startPos, endPos-1), nil)
+ rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
}
}()
select {
gotRdr := make(chan struct{})
go func() {
defer close(gotRdr)
- rdr, err = v.bsClient.GetBlob(v.ContainerName, loc)
+ rdr, err = v.container.GetBlob(loc)
}()
select {
case <-ctx.Done():
body = http.NoBody
bufr.Close()
}
- errChan <- v.bsClient.CreateBlockBlobFromReader(v.ContainerName, loc, uint64(len(block)), body, nil)
+ errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
}()
select {
case <-ctx.Done():
}
metadata["touch"] = fmt.Sprintf("%d", time.Now())
- return v.bsClient.SetBlobMetadata(v.ContainerName, loc, metadata, nil)
+ return v.container.SetBlobMetadata(loc, metadata, nil)
}
// Mtime returns the last-modified property of a block blob.
return time.Time{}, os.ErrNotExist
}
- props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
+ props, err := v.container.GetBlobProperties(loc)
if err != nil {
return time.Time{}, err
}
- return time.Parse(time.RFC1123, props.LastModified)
+ return time.Time(props.LastModified), nil
}
// IndexTo writes a list of Keep blocks that are stored in the
func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
params := storage.ListBlobsParameters{
Prefix: prefix,
- Include: "metadata",
+ Include: &storage.IncludeBlobDataset{Metadata: true},
}
for {
- resp, err := v.bsClient.ListBlobs(v.ContainerName, params)
+ resp, err := v.container.ListBlobs(params)
if err != nil {
return err
}
for _, b := range resp.Blobs {
- t, err := time.Parse(time.RFC1123, b.Properties.LastModified)
- if err != nil {
- return err
- }
if !v.isKeepBlock(b.Name) {
continue
}
- if b.Properties.ContentLength == 0 && t.Add(azureWriteRaceInterval).After(time.Now()) {
+ modtime := time.Time(b.Properties.LastModified)
+ if b.Properties.ContentLength == 0 && modtime.Add(azureWriteRaceInterval).After(time.Now()) {
// A new zero-length blob is probably
// just a new non-empty blob that
// hasn't committed its data yet (see
// Trashed blob; exclude it from response
continue
}
- fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, t.UnixNano())
+ fmt.Fprintf(writer, "%s+%d %d\n", b.Name, b.Properties.ContentLength, modtime.UnixNano())
}
if resp.NextMarker == "" {
return nil
// we get the Etag before checking Mtime, and use If-Match to
// ensure we don't delete data if Put() or Touch() happens
// between our calls to Mtime() and DeleteBlob().
- props, err := v.bsClient.GetBlobProperties(v.ContainerName, loc)
+ props, err := v.container.GetBlobProperties(loc)
if err != nil {
return err
}
// If TrashLifetime == 0, just delete it
if theConfig.TrashLifetime == 0 {
- return v.bsClient.DeleteBlob(v.ContainerName, loc, map[string]string{
- "If-Match": props.Etag,
+ return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
+ IfMatch: props.Etag,
})
}
// Otherwise, mark as trash
- return v.bsClient.SetBlobMetadata(v.ContainerName, loc, map[string]string{
+ return v.container.SetBlobMetadata(loc, storage.BlobMetadata{
"expires_at": fmt.Sprintf("%d", time.Now().Add(theConfig.TrashLifetime.Duration()).Unix()),
- }, map[string]string{
- "If-Match": props.Etag,
+ }, &storage.SetBlobMetadataOptions{
+ IfMatch: props.Etag,
})
}
// Delete the expires_at metadata attribute
func (v *AzureBlobVolume) Untrash(loc string) error {
// if expires_at does not exist, return NotFoundError
- metadata, err := v.bsClient.GetBlobMetadata(v.ContainerName, loc)
+ metadata, err := v.container.GetBlobMetadata(loc)
if err != nil {
return v.translateError(err)
}
// reset expires_at metadata attribute
metadata["expires_at"] = ""
- err = v.bsClient.SetBlobMetadata(v.ContainerName, loc, metadata, nil)
+ err = v.container.SetBlobMetadata(loc, metadata, nil)
return v.translateError(err)
}
func (v *AzureBlobVolume) EmptyTrash() {
var bytesDeleted, bytesInTrash int64
var blocksDeleted, blocksInTrash int
- params := storage.ListBlobsParameters{Include: "metadata"}
+ params := storage.ListBlobsParameters{Include: &storage.IncludeBlobDataset{Metadata: true}}
for {
- resp, err := v.bsClient.ListBlobs(v.ContainerName, params)
+ resp, err := v.container.ListBlobs(params)
if err != nil {
log.Printf("EmptyTrash: ListBlobs: %v", err)
break
continue
}
- err = v.bsClient.DeleteBlob(v.ContainerName, b.Name, map[string]string{
- "If-Match": b.Properties.Etag,
+ err = v.container.DeleteBlob(b.Name, &storage.DeleteBlobOptions{
+ IfMatch: b.Properties.Etag,
})
if err != nil {
log.Printf("EmptyTrash: DeleteBlob(%v): %v", b.Name, err)
// InternalStats returns bucket I/O and API call counters.
func (v *AzureBlobVolume) InternalStats() interface{} {
- return &v.bsClient.stats
+ return &v.container.stats
}
type azureBlobStats struct {
s.statsTicker.TickErr(err, errType)
}
-// azureBlobClient wraps storage.BlobStorageClient in order to count
-// I/O and API usage stats.
-type azureBlobClient struct {
- client *storage.BlobStorageClient
- stats azureBlobStats
+// azureContainer wraps storage.Container in order to count I/O and
+// API usage stats.
+type azureContainer struct {
+ ctr *storage.Container
+ stats azureBlobStats
}
-func (c *azureBlobClient) ContainerExists(cname string) (bool, error) {
+func (c *azureContainer) Exists() (bool, error) {
c.stats.Tick(&c.stats.Ops)
- ok, err := c.client.ContainerExists(cname)
+ ok, err := c.ctr.Exists()
c.stats.TickErr(err)
return ok, err
}
-func (c *azureBlobClient) GetBlobMetadata(cname, bname string) (map[string]string, error) {
+func (c *azureContainer) GetBlobMetadata(bname string) (storage.BlobMetadata, error) {
c.stats.Tick(&c.stats.Ops, &c.stats.GetMetadataOps)
- m, err := c.client.GetBlobMetadata(cname, bname)
+ b := c.ctr.GetBlobReference(bname)
+ err := b.GetMetadata(nil)
c.stats.TickErr(err)
- return m, err
+ return b.Metadata, err
}
-func (c *azureBlobClient) GetBlobProperties(cname, bname string) (*storage.BlobProperties, error) {
+func (c *azureContainer) GetBlobProperties(bname string) (*storage.BlobProperties, error) {
c.stats.Tick(&c.stats.Ops, &c.stats.GetPropertiesOps)
- p, err := c.client.GetBlobProperties(cname, bname)
+ b := c.ctr.GetBlobReference(bname)
+ err := b.GetProperties(nil)
c.stats.TickErr(err)
- return p, err
+ return &b.Properties, err
}
-func (c *azureBlobClient) GetBlob(cname, bname string) (io.ReadCloser, error) {
+func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
- rdr, err := c.client.GetBlob(cname, bname)
+ b := c.ctr.GetBlobReference(bname)
+ rdr, err := b.Get(nil)
c.stats.TickErr(err)
return NewCountingReader(rdr, c.stats.TickInBytes), err
}
-func (c *azureBlobClient) GetBlobRange(cname, bname, byterange string, hdrs map[string]string) (io.ReadCloser, error) {
+func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
- rdr, err := c.client.GetBlobRange(cname, bname, byterange, hdrs)
+ b := c.ctr.GetBlobReference(bname)
+ rdr, err := b.GetRange(&storage.GetBlobRangeOptions{
+ Range: &storage.BlobRange{
+ Start: uint64(start),
+ End: uint64(end),
+ },
+ GetBlobOptions: opts,
+ })
c.stats.TickErr(err)
return NewCountingReader(rdr, c.stats.TickInBytes), err
}
-func (c *azureBlobClient) CreateBlockBlobFromReader(cname, bname string, size uint64, rdr io.Reader, hdrs map[string]string) error {
+// If we give it an io.Reader that doesn't also have a Len() int
+// method, the Azure SDK determines data size by copying the data into
+// a new buffer, which is not a good use of memory.
+type readerWithAzureLen struct {
+ io.Reader
+ len int
+}
+
+// Len satisfies the private lener interface in azure-sdk-for-go.
+func (r *readerWithAzureLen) Len() int {
+ return r.len
+}
+
+func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr io.Reader, opts *storage.PutBlobOptions) error {
c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
if size != 0 {
- rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+ rdr = &readerWithAzureLen{
+ Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
+ len: size,
+ }
}
- err := c.client.CreateBlockBlobFromReader(cname, bname, size, rdr, hdrs)
+ b := c.ctr.GetBlobReference(bname)
+ err := b.CreateBlockBlobFromReader(rdr, opts)
c.stats.TickErr(err)
return err
}
-func (c *azureBlobClient) SetBlobMetadata(cname, bname string, m, hdrs map[string]string) error {
+func (c *azureContainer) SetBlobMetadata(bname string, m storage.BlobMetadata, opts *storage.SetBlobMetadataOptions) error {
c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
- err := c.client.SetBlobMetadata(cname, bname, m, hdrs)
+ b := c.ctr.GetBlobReference(bname)
+ b.Metadata = m
+ err := b.SetMetadata(opts)
c.stats.TickErr(err)
return err
}
-func (c *azureBlobClient) ListBlobs(cname string, params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
+func (c *azureContainer) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
- resp, err := c.client.ListBlobs(cname, params)
+ resp, err := c.ctr.ListBlobs(params)
c.stats.TickErr(err)
return resp, err
}
-func (c *azureBlobClient) DeleteBlob(cname, bname string, hdrs map[string]string) error {
+func (c *azureContainer) DeleteBlob(bname string, opts *storage.DeleteBlobOptions) error {
c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
- err := c.client.DeleteBlob(cname, bname, hdrs)
+ b := c.ctr.GetBlobReference(bname)
+ err := b.Delete(opts)
c.stats.TickErr(err)
return err
}
"revision": "21e563311c2dc5ac53464a2c31cb91fb833c6cb9",
"revisionTime": "2017-07-27T13:52:37Z"
},
+ {
+ "checksumSHA1": "PfyfOXsPbGEWmdh54cguqzdwloY=",
+ "path": "github.com/Azure/azure-sdk-for-go/version",
+ "revision": "471256ff7c6c93b96131845cef5309d20edd313d",
+ "revisionTime": "2018-02-14T01:17:07Z"
+ },
+ {
+ "checksumSHA1": "LQWU/2M2E4L/hVzT9BVW1SkLrpA=",
+ "path": "github.com/Azure/go-autorest/autorest",
+ "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
+ "revisionTime": "2017-11-30T17:00:06Z"
+ },
+ {
+ "checksumSHA1": "nBQ7cdhoeYUur6G6HG97uueoDmE=",
+ "path": "github.com/Azure/go-autorest/autorest/adal",
+ "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
+ "revisionTime": "2017-11-30T17:00:06Z"
+ },
+ {
+ "checksumSHA1": "zXyLmDVpkYkIsL0yinNLoW82IZc=",
+ "path": "github.com/Azure/go-autorest/autorest/azure",
+ "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
+ "revisionTime": "2017-11-30T17:00:06Z"
+ },
+ {
+ "checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=",
+ "path": "github.com/Azure/go-autorest/autorest/date",
+ "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
+ "revisionTime": "2017-11-30T17:00:06Z"
+ },
{
"checksumSHA1": "o/3cn04KAiwC7NqNVvmfVTD+hgA=",
"path": "github.com/Microsoft/go-winio",
"revisionTime": "2018-01-08T08:51:32Z"
},
{
- "checksumSHA1": "pAu+do4x7E5SFLfIqJeGwhcOd6E=",
+ "checksumSHA1": "cvqRUhkzb7TVPfqOBH2shPRfLSY=",
"path": "github.com/curoverse/azure-sdk-for-go/storage",
- "revision": "1620af6b32398bfc91827ceae54a8cc1f55df04d",
+ "revision": "d07ad83489a4b8049ef4bcafcc622c4b16cc6b9c",
"revisionTime": "2016-12-14T20:08:43Z"
},
+ {
+ "checksumSHA1": "+TKtBzv23ywvmmqRiGEjUba4YmI=",
+ "path": "github.com/dgrijalva/jwt-go",
+ "revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29",
+ "revisionTime": "2017-10-19T21:57:19Z"
+ },
{
"checksumSHA1": "Gj+xR1VgFKKmFXYOJMnAczC3Znk=",
"path": "github.com/docker/distribution/digestset",
"revision": "83612a56d3dd153a94a629cd64925371c9adad78",
"revisionTime": "2017-11-26T05:04:59Z"
},
+ {
+ "checksumSHA1": "T9E+5mKBQ/BX4wlNxgaPfetxdeI=",
+ "path": "github.com/marstr/guid",
+ "revision": "8bdf7d1a087ccc975cf37dd6507da50698fd19ca",
+ "revisionTime": "2017-04-27T23:51:15Z"
+ },
{
"checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
"revision": "cb4147076ac75738c9a7d279075a253c0cc5acbd",
"revisionTime": "2018-01-25T13:30:57Z"
},
+ {
+ "checksumSHA1": "eDQ6f1EsNf+frcRO/9XukSEchm8=",
+ "path": "github.com/satori/go.uuid",
+ "revision": "36e9d2ebbde5e3f13ab2e25625fd453271d6522e",
+ "revisionTime": "2018-01-03T17:44:51Z"
+ },
{
"checksumSHA1": "UwtyqB7CaUWPlw0DVJQvw0IFQZs=",
"path": "github.com/sergi/go-diff/diffmatchpatch",