"io"
"io/ioutil"
"log"
+ "net/http"
"os"
"regexp"
"strconv"
"sync"
"time"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/curoverse/azure-sdk-for-go/storage"
)
+const azureDefaultRequestTimeout = arvados.Duration(10 * time.Minute)
+
var (
azureMaxGetBytes int
azureStorageAccountName string
ContainerName string
AzureReplication int
ReadOnly bool
+ RequestTimeout arvados.Duration
azClient storage.Client
bsClient storage.BlobStorageClient
StorageAccountKeyFile: "/etc/azure_storage_account_key.txt",
ContainerName: "example-container-name",
AzureReplication: 3,
+ RequestTimeout: azureDefaultRequestTimeout,
},
}
}
if err != nil {
return fmt.Errorf("creating Azure storage client: %s", err)
}
+
+ if v.RequestTimeout == 0 {
+ v.RequestTimeout = azureDefaultRequestTimeout
+ }
+ v.azClient.HTTPClient = &http.Client{
+ Timeout: time.Duration(v.RequestTimeout),
+ }
v.bsClient = v.azClient.GetBlobService()
ok, err := v.bsClient.ContainerExists(v.ContainerName)
"github.com/AdRoll/goamz/s3"
)
+const (
+ s3DefaultReadTimeout = arvados.Duration(10 * time.Minute)
+ s3DefaultConnectTimeout = arvados.Duration(time.Minute)
+)
+
var (
// ErrS3TrashDisabled is returned by Trash if that operation
// is impossible with the current config.
LocationConstraint bool
IndexPageSize int
S3Replication int
+ ConnectTimeout arvados.Duration
+ ReadTimeout arvados.Duration
RaceWindow arvados.Duration
ReadOnly bool
UnsafeDelete bool
func (*S3Volume) Examples() []Volume {
return []Volume{
&S3Volume{
- AccessKeyFile: "/etc/aws_s3_access_key.txt",
- SecretKeyFile: "/etc/aws_s3_secret_key.txt",
- Endpoint: "",
- Region: "us-east-1",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
+ AccessKeyFile: "/etc/aws_s3_access_key.txt",
+ SecretKeyFile: "/etc/aws_s3_secret_key.txt",
+ Endpoint: "",
+ Region: "us-east-1",
+ Bucket: "example-bucket-name",
+ IndexPageSize: 1000,
+ S3Replication: 2,
+ RaceWindow: arvados.Duration(24 * time.Hour),
+ ConnectTimeout: arvados.Duration(time.Minute),
+ ReadTimeout: arvados.Duration(5 * time.Minute),
},
&S3Volume{
- AccessKeyFile: "/etc/gce_s3_access_key.txt",
- SecretKeyFile: "/etc/gce_s3_secret_key.txt",
- Endpoint: "https://storage.googleapis.com",
- Region: "",
- Bucket: "example-bucket-name",
- IndexPageSize: 1000,
- S3Replication: 2,
- RaceWindow: arvados.Duration(24 * time.Hour),
+ AccessKeyFile: "/etc/gce_s3_access_key.txt",
+ SecretKeyFile: "/etc/gce_s3_secret_key.txt",
+ Endpoint: "https://storage.googleapis.com",
+ Region: "",
+ Bucket: "example-bucket-name",
+ IndexPageSize: 1000,
+ S3Replication: 2,
+ RaceWindow: arvados.Duration(24 * time.Hour),
+ ConnectTimeout: arvados.Duration(time.Minute),
+ ReadTimeout: arvados.Duration(5 * time.Minute),
},
}
}
if err != nil {
return err
}
+
+ // Zero timeouts mean "wait forever", which is a bad
+ // default. Default to long timeouts instead.
+ if v.ConnectTimeout == 0 {
+ v.ConnectTimeout = s3DefaultConnectTimeout
+ }
+ if v.ReadTimeout == 0 {
+ v.ReadTimeout = s3DefaultReadTimeout
+ }
+
+ client := s3.New(auth, region)
+ client.ConnectTimeout = time.Duration(v.ConnectTimeout)
+ client.ReadTimeout = time.Duration(v.ReadTimeout)
v.bucket = &s3.Bucket{
- S3: s3.New(auth, region),
+ S3: client,
Name: v.Bucket,
}
return nil