+ return buf, err
+}
+
+func (v *AzureBlobVolume) get(loc string) ([]byte, error) {
+ expectSize := BlockSize
+ if azureMaxGetBytes < BlockSize {
+ // Unfortunately the handler doesn't tell us how long the blob
+ // is expected to be, so we have to ask Azure.
+ props, err := v.bsClient.GetBlobProperties(v.containerName, loc)
+ if err != nil {
+ return nil, v.translateError(err)
+ }
+ if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
+ return nil, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
+ }
+ expectSize = int(props.ContentLength)
+ }
+
+ buf := bufs.Get(expectSize)
+ if expectSize == 0 {
+ return buf, nil
+ }
+
+ // We'll update this actualSize if/when we get the last piece.
+ actualSize := -1
+ pieces := (expectSize + azureMaxGetBytes - 1) / azureMaxGetBytes
+ errors := make([]error, pieces)
+ var wg sync.WaitGroup
+ wg.Add(pieces)
+ for p := 0; p < pieces; p++ {
+ go func(p int) {
+ defer wg.Done()
+ startPos := p * azureMaxGetBytes
+ endPos := startPos + azureMaxGetBytes
+ if endPos > expectSize {
+ endPos = expectSize
+ }
+ var rdr io.ReadCloser
+ var err error
+ if startPos == 0 && endPos == expectSize {
+ rdr, err = v.bsClient.GetBlob(v.containerName, loc)
+ } else {
+ rdr, err = v.bsClient.GetBlobRange(v.containerName, loc, fmt.Sprintf("%d-%d", startPos, endPos-1))
+ }
+ if err != nil {
+ errors[p] = err
+ return
+ }
+ defer rdr.Close()
+ n, err := io.ReadFull(rdr, buf[startPos:endPos])
+ if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
+ // If we don't know the actual size,
+ // and just tried reading 64 MiB, it's
+ // normal to encounter EOF.
+ } else if err != nil {
+ errors[p] = err
+ }
+ if p == pieces-1 {
+ actualSize = startPos + n
+ }
+ }(p)