# Deploy with Capistrano
# gem 'capistrano'
-# To use debugger
-#gem 'byebug'
-
gem 'passenger', :group => :production
gem 'andand'
gem 'RedCloth'
i18n (>= 0.7, < 2)
minitest (~> 5.1)
tzinfo (~> 1.1)
- addressable (2.6.0)
- public_suffix (>= 2.0.2, < 4.0)
+ addressable (2.7.0)
+ public_suffix (>= 2.0.2, < 5.0)
andand (1.3.3)
angularjs-rails (1.3.15)
arel (7.1.4)
- arvados (1.3.1.20190320201707)
+ arvados (1.3.3.20190320201707)
activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
arvados-google-api-client (>= 0.7, < 0.8.9)
flamegraph (0.9.5)
globalid (0.4.2)
activesupport (>= 4.2.0)
- googleauth (0.8.1)
+ googleauth (0.9.0)
faraday (~> 0.12)
jwt (>= 1.4, < 3.0)
memoist (~> 0.16)
actionpack (>= 4)
less (~> 2.6.0)
sprockets (>= 2)
- libv8 (3.16.14.19)
+ libv8 (3.16.14.19-x86_64-linux)
lograge (0.10.0)
actionpack (>= 4)
activesupport (>= 4)
morrisjs-rails (0.5.1.2)
railties (> 3.1, < 6)
multi_json (1.13.1)
- multipart-post (2.0.0)
+ multipart-post (2.1.1)
net-scp (2.0.0)
net-ssh (>= 2.6.5, < 6.0.0)
net-sftp (2.1.2)
net-ssh-gateway (2.0.0)
net-ssh (>= 4.0.0)
nio4r (2.3.1)
- nokogiri (1.10.2)
+ nokogiri (1.10.4)
mini_portile2 (~> 2.4.0)
npm-rails (0.2.1)
rails (>= 3.2)
- oj (3.7.11)
- os (1.0.0)
+ oj (3.7.12)
+ os (1.0.1)
passenger (6.0.2)
rack
rake (>= 0.8.1)
cliver (~> 0.3.1)
multi_json (~> 1.0)
websocket-driver (>= 0.2.0)
- public_suffix (3.0.3)
+ public_suffix (4.0.1)
rack (2.0.7)
rack-mini-profiler (1.0.2)
rack (>= 1.2.0)
retriable (1.4.1)
ruby-debug-passenger (0.2.0)
ruby-prof (0.17.0)
- rubyzip (1.2.2)
+ rubyzip (1.3.0)
rvm-capistrano (1.5.6)
capistrano (~> 2.15.4)
safe_yaml (1.0.5)
return 1
fi
- go get -ldflags "-X main.version=${go_package_version}" "git.curoverse.com/arvados.git/$src_path"
+ go get -ldflags "-X git.curoverse.com/arvados.git/lib/cmd.version=${go_package_version} -X main.version=${go_package_version}" "git.curoverse.com/arvados.git/$src_path"
local -a switches=()
systemd_unit="$WORKSPACE/${src_path}/${prog}.service"
retry do_test_once ${@}
}
+go_ldflags() {
+ version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}
+ echo "-X git.curoverse.com/arvados.git/lib/cmd.version=${version} -X main.version=${version}"
+}
+
do_test_once() {
unset result
# before trying "go test". Otherwise, coverage-reporting
# mode makes Go show the wrong line numbers when reporting
# compilation errors.
- go get -ldflags "-X git.curoverse.com/arvados.git/lib/cmd.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1" && \
+ go get -ldflags "$(go_ldflags)" -t "git.curoverse.com/arvados.git/$1" && \
cd "$GOPATH/src/git.curoverse.com/arvados.git/$1" && \
if [[ -n "${testargs[$1]}" ]]
then
result=1
elif [[ "$2" == "go" ]]
then
- go get -ldflags "-X git.curoverse.com/arvados.git/lib/cmd.version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}" -t "git.curoverse.com/arvados.git/$1"
+ go get -ldflags "$(go_ldflags)" -t "git.curoverse.com/arvados.git/$1"
elif [[ "$2" == "pip" ]]
then
# $3 can name a path directory for us to use, including trailing
h3. delete
-Delete an existing Container.
+Delete a Container.
+
+This API requires admin privileges. In normal operation, it should not be used at all. API clients like Workbench might not work correctly when a container request references a container that has been deleted.
Arguments:
Driver: S3
DriverParameters:
- # The credentials to use to access the bucket.
- AccessKey: aaaaa
- SecretKey: aaaaa
+ # IAM role name to use when retrieving credentials from
+ # instance metadata. It can be omitted, in which case the
+ # role name itself will be retrieved from instance metadata
+ # -- but setting it explicitly may protect you from using
+ # the wrong credentials in the event of an
+ # installation/configuration error.
+ IAMRole: ""
+
+ # If you are not using an IAM role for authentication,
+ # specify access credentials here instead.
+ AccessKey: ""
+ SecretKey: ""
# Storage provider endpoint. For Amazon S3, use "" or
# omit. For Google Cloud Storage, use
# for s3 driver -- see
# https://doc.arvados.org/install/configure-s3-object-storage.html
+ IAMRole: aaaaa
AccessKey: aaaaa
SecretKey: aaaaa
Endpoint: ""
# for s3 driver -- see
# https://doc.arvados.org/install/configure-s3-object-storage.html
+ IAMRole: aaaaa
AccessKey: aaaaa
SecretKey: aaaaa
Endpoint: ""
})
ctx := ctxlog.Context(c.ctx, logger)
- listenURL, err := getListenAddr(cluster.Services, c.svcName)
+ listenURL, err := getListenAddr(cluster.Services, c.svcName, log)
if err != nil {
return 1
}
const rfc3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-func getListenAddr(svcs arvados.Services, prog arvados.ServiceName) (arvados.URL, error) {
+func getListenAddr(svcs arvados.Services, prog arvados.ServiceName, log logrus.FieldLogger) (arvados.URL, error) {
svc, ok := svcs.Map()[prog]
if !ok {
return arvados.URL{}, fmt.Errorf("unknown service name %q", prog)
if err == nil {
listener.Close()
return url, nil
+ } else if strings.Contains(err.Error(), "cannot assign requested address") {
+ continue
+ } else if strings.Contains(err.Error(), "address already in use") {
+ return url, err
+ } else {
+ log.Warn(err)
}
}
return arvados.URL{}, fmt.Errorf("configuration does not enable the %s service on this host", prog)
s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
s.add_runtime_dependency 'oj', '~> 3.0'
s.add_runtime_dependency 'curb', '~> 0.8'
+ # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
+ s.add_dependency('faraday', '< 0.16')
s.homepage =
'https://arvados.org'
end
manifest_text: foo_manifest
}.to_json)
end
- assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
+ assert(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out))
assert_equal '', err
end
assert_arv('--format', 'uuid',
'collection', 'create', '--collection', tempfile.path)
end
- assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
+ assert(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out))
assert_equal '', err
ensure
tempfile.unlink
def create_arv_object_with_value(value)
out, err = capture_subprocess_io do
system("arv", "tag", "add", value, "--object", "testing")
- assert $?.success?, "Command failure running `arv tag`: #{$?}"
end
assert_equal '', err
+ assert $?.success?, "Command failure running `arv tag`: #{$?}"
assert_operator 0, :<, out.strip.length
out.strip
end
assert_arv_get false
end
assert_equal '', out
- assert_match /^usage:/, err
+ assert_match(/^usage:/, err)
end
def test_get_version
end
$stderr.write err
assert_equal '', err
- assert_match /^usage:/, out
+ assert_match(/^usage:/, out)
end
def test_file_to_dev_stdout
out, err = capture_subprocess_io do
assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
end
- assert_match /Local file tmp\/foo already exists/, err
+ assert_match(/Local file tmp\/foo already exists/, err)
assert_equal '', out
assert_equal 'baz', IO.read('tmp/foo')
end
out, err = capture_subprocess_io do
assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
end
- assert_match /Local file tmp\/foo already exists/, err
+ assert_match(/Local file tmp\/foo already exists/, err)
assert_equal '', out
assert_equal 'baz', IO.read('tmp/foo')
end
assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/foo'
end
assert_equal '', out
- assert_match /^usage:/, err
+ assert_match(/^usage:/, err)
end
def test_dir_to_empty_string
assert_arv_get false, @@foo_manifest_locator + '/', ''
end
assert_equal '', out
- assert_match /^usage:/, err
+ assert_match(/^usage:/, err)
end
def test_nonexistent_block
assert_arv_get false, 'e796ab2294f3e48ec709ffa8d6daf58c'
end
assert_equal '', out
- assert_match /ERROR:/, err
+ assert_match(/ERROR:/, err)
end
def test_nonexistent_manifest
assert_arv_get false, 'acbd18db4cc2f85cedef654fccc4a4d8/', 'tmp/'
end
assert_equal '', out
- assert_match /ERROR:/, err
+ assert_match(/ERROR:/, err)
end
def test_manifest_root_to_dir
end
$stderr.write err
assert_empty err
- assert_match /^usage:/, out
+ assert_match(/^usage:/, out)
end
def test_raw_stdin
assert_equal(false, arv_put('--filename', 'foo', './tmp/empty_dir/.'),
'arv-put --filename refuses directory')
end
- assert_match /^usage:.*error:/m, err
+ assert_match(/^usage:.*error:/m, err)
assert_empty out
end
'./tmp/empty_file'),
'arv-put --filename refuses directory')
end
- assert_match /^usage:.*error:/m, err
+ assert_match(/^usage:.*error:/m, err)
assert_empty out
end
out, err = capture_subprocess_io do
assert arv_put('--no-cache', '--manifest', '--progress', './tmp/foo')
end
- assert_match /%/, err
+ assert_match(/%/, err)
assert match_collection_uuid(out)
end
out, err = capture_subprocess_io do
assert arv_put('--no-cache', '--manifest', '--batch-progress', './tmp/foo')
end
- assert_match /: 0 written 3 total/, err
- assert_match /: 3 written 3 total/, err
+ assert_match(/: 0 written 3 total/, err)
+ assert_match(/: 3 written 3 total/, err)
assert match_collection_uuid(out)
end
arv_put('--progress', '--batch-progress', './tmp/foo'),
'arv-put --progress --batch-progress is contradictory')
end
- assert_match /^usage:.*error:/m, err
+ assert_match(/^usage:.*error:/m, err)
assert_empty out
end
assert_equal false, arv_tag
end
assert_empty out
- assert_match /^usage:/i, err
+ assert_match(/^usage:/i, err)
end
# Test adding and removing a single tag on a single object.
end
def test_arv_ws_get_help
- out, err = capture_subprocess_io do
+ _, err = capture_subprocess_io do
system ('arv-ws -h')
end
assert_equal '', err
end
def test_arv_ws_such_option
- out, err = capture_subprocess_io do
+ _, err = capture_subprocess_io do
system ('arv-ws --junk')
end
refute_equal '', err
hints:
- class: arv:RunInSingleContainer
- class: ResourceRequirement
- ramMin: $(inputs.count*32)
+ ramMin: $(96+inputs.count*32)
- class: arv:APIRequirement
scatter: count
run:
type: int
script: File
outputs: []
- arguments: [python, $(inputs.script), $(inputs.count * 32)]
+ arguments: [python, $(inputs.script), $(96+inputs.count * 32)]
outputs: []
hints:
- class: ResourceRequirement
- ramMin: $(inputs.count*32)
+ ramMin: $(96+inputs.count*32)
steps:
sleep1:
in:
type: int
script: File
outputs: []
- arguments: [python, $(inputs.script), $(inputs.count * 32)]
+ arguments: [python, $(inputs.script), $(96+inputs.count * 32)]
id: subtool
hints:
- class: ResourceRequirement
- ramMin: $(inputs.count*32)
+ ramMin: $(96+inputs.count*32)
inputs:
count:
type: int
script: File
outputs: []
- arguments: [python, $(inputs.script), $(inputs.count * 32)]
+ arguments: [python, $(inputs.script), $(96+inputs.count * 32)]
id: subtool
hints:
- class: ResourceRequirement
- ramMin: 32
+ ramMin: 128
inputs:
count:
type: int
script: File
outputs: []
- arguments: [python, $(inputs.script), "32"]
+ arguments: [python, $(inputs.script), "128"]
# work around undeclared dependency on i18n in some activesupport 3.x.x:
s.add_dependency('i18n', '~> 0')
s.add_dependency('json', '>= 1.7.7', '<3')
+ # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
+ s.add_dependency('faraday', '< 0.16')
s.add_runtime_dependency('jwt', '<2', '>= 0.1.5')
s.homepage =
'https://arvados.org'
current_user.andand.is_admin
end
+ def permission_to_destroy
+ current_user.andand.is_admin
+ end
+
def ensure_owner_uuid_is_permitted
# validate_change ensures owner_uuid can't be changed at all --
# except during create, which requires admin privileges. Checking
end
end
+ test "user cannot delete" do
+ set_user_from_auth :active
+ c, _ = minimal_new
+ assert_raises ArvadosModel::PermissionDeniedError do
+ c.destroy
+ end
+ assert Container.find_by_uuid(c.uuid)
+ end
+
[
{state: Container::Complete, exit_code: 0, output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'},
{state: Container::Cancelled},
func newAzureBlobVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
v := &AzureBlobVolume{
- StorageBaseURL: storage.DefaultBaseURL,
RequestTimeout: azureDefaultRequestTimeout,
WriteRaceInterval: azureDefaultWriteRaceInterval,
WriteRacePollTime: azureDefaultWriteRacePollTime,
if v.ListBlobsMaxAttempts == 0 {
v.ListBlobsMaxAttempts = azureDefaultListBlobsMaxAttempts
}
+ if v.StorageBaseURL == "" {
+ v.StorageBaseURL = storage.DefaultBaseURL
+ }
if v.ContainerName == "" || v.StorageAccountName == "" || v.StorageAccountKey == "" {
return nil, errors.New("DriverParameters: ContainerName, StorageAccountName, and StorageAccountKey must be provided")
}
package main
import (
+ "bufio"
"bytes"
"context"
"crypto/sha256"
}
func (v *S3Volume) check() error {
- if v.Bucket == "" || v.AccessKey == "" || v.SecretKey == "" {
- return errors.New("DriverParameters: Bucket, AccessKey, and SecretKey must be provided")
+ if v.Bucket == "" {
+ return errors.New("DriverParameters: Bucket must be provided")
}
if v.IndexPageSize == 0 {
v.IndexPageSize = 1000
return errors.New("DriverParameters: RaceWindow must not be negative")
}
- region, ok := aws.Regions[v.Region]
+ var ok bool
+ v.region, ok = aws.Regions[v.Region]
if v.Endpoint == "" {
if !ok {
return fmt.Errorf("unrecognized region %+q; try specifying endpoint instead", v.Region)
return fmt.Errorf("refusing to use AWS region name %+q with endpoint %+q; "+
"specify empty endpoint or use a different region name", v.Region, v.Endpoint)
} else {
- region = aws.Region{
+ v.region = aws.Region{
Name: v.Region,
S3Endpoint: v.Endpoint,
S3LocationConstraint: v.LocationConstraint,
}
}
- auth := aws.Auth{
- AccessKey: v.AccessKey,
- SecretKey: v.SecretKey,
- }
-
// Zero timeouts mean "wait forever", which is a bad
// default. Default to long timeouts instead.
if v.ConnectTimeout == 0 {
v.ReadTimeout = s3DefaultReadTimeout
}
- client := s3.New(auth, region)
- if region.EC2Endpoint.Signer == aws.V4Signature {
- // Currently affects only eu-central-1
- client.Signature = aws.V4Signature
- }
- client.ConnectTimeout = time.Duration(v.ConnectTimeout)
- client.ReadTimeout = time.Duration(v.ReadTimeout)
v.bucket = &s3bucket{
- Bucket: &s3.Bucket{
- S3: client,
+ bucket: &s3.Bucket{
+ S3: v.newS3Client(),
Name: v.Bucket,
},
}
lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
+ err := v.bootstrapIAMCredentials()
+ if err != nil {
+ return fmt.Errorf("error getting IAM credentials: %s", err)
+ }
+
return nil
}
type S3Volume struct {
AccessKey string
SecretKey string
+ AuthToken string // populated automatically when IAMRole is used
+ AuthExpiration time.Time // populated automatically when IAMRole is used
+ IAMRole string
Endpoint string
Region string
Bucket string
logger logrus.FieldLogger
metrics *volumeMetricsVecs
bucket *s3bucket
+ region aws.Region
startOnce sync.Once
}
return "s3://" + v.Endpoint + "/" + v.Bucket
}
+func (v *S3Volume) bootstrapIAMCredentials() error {
+ if v.AccessKey != "" || v.SecretKey != "" {
+ if v.IAMRole != "" {
+ return errors.New("invalid DriverParameters: AccessKey and SecretKey must be blank if IAMRole is specified")
+ }
+ return nil
+ }
+ ttl, err := v.updateIAMCredentials()
+ if err != nil {
+ return err
+ }
+ go func() {
+ for {
+ time.Sleep(ttl)
+ ttl, err = v.updateIAMCredentials()
+ if err != nil {
+ v.logger.WithError(err).Warnf("failed to update credentials for IAM role %q", v.IAMRole)
+ ttl = time.Second
+ } else if ttl < time.Second {
+ v.logger.WithField("TTL", ttl).Warnf("received stale credentials for IAM role %q", v.IAMRole)
+ ttl = time.Second
+ }
+ }
+ }()
+ return nil
+}
+
+func (v *S3Volume) newS3Client() *s3.S3 {
+ auth := aws.NewAuth(v.AccessKey, v.SecretKey, v.AuthToken, v.AuthExpiration)
+ client := s3.New(*auth, v.region)
+ if v.region.EC2Endpoint.Signer == aws.V4Signature {
+ // Currently affects only eu-central-1
+ client.Signature = aws.V4Signature
+ }
+ client.ConnectTimeout = time.Duration(v.ConnectTimeout)
+ client.ReadTimeout = time.Duration(v.ReadTimeout)
+ return client
+}
+
+// returned by AWS metadata endpoint .../security-credentials/${rolename}
+type iamCredentials struct {
+ Code string
+ LastUpdated time.Time
+ Type string
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+ Expiration time.Time
+}
+
+// Returns TTL of updated credentials, i.e., time to sleep until next
+// update.
+func (v *S3Volume) updateIAMCredentials() (time.Duration, error) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
+ defer cancel()
+
+ metadataBaseURL := "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
+
+ var url string
+ if strings.Contains(v.IAMRole, "://") {
+ // Configuration provides complete URL (used by tests)
+ url = v.IAMRole
+ } else if v.IAMRole != "" {
+ // Configuration provides IAM role name and we use the
+ // AWS metadata endpoint
+ url = metadataBaseURL + v.IAMRole
+ } else {
+ url = metadataBaseURL
+ v.logger.WithField("URL", url).Debug("looking up IAM role name")
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return 0, fmt.Errorf("error setting up request %s: %s", url, err)
+ }
+ resp, err := http.DefaultClient.Do(req.WithContext(ctx))
+ if err != nil {
+ return 0, fmt.Errorf("error getting %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotFound {
+ return 0, fmt.Errorf("this instance does not have an IAM role assigned -- either assign a role, or configure AccessKey and SecretKey explicitly in DriverParameters (error getting %s: HTTP status %s)", url, resp.Status)
+ } else if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
+ }
+ body := bufio.NewReader(resp.Body)
+ var role string
+ _, err = fmt.Fscanf(body, "%s\n", &role)
+ if err != nil {
+ return 0, fmt.Errorf("error reading response from %s: %s", url, err)
+ }
+ if n, _ := body.Read(make([]byte, 64)); n > 0 {
+ v.logger.Warnf("ignoring additional data returned by metadata endpoint %s after the single role name that we expected", url)
+ }
+ v.logger.WithField("Role", role).Debug("looked up IAM role name")
+ url = url + role
+ }
+
+ v.logger.WithField("URL", url).Debug("getting credentials")
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return 0, fmt.Errorf("error setting up request %s: %s", url, err)
+ }
+ resp, err := http.DefaultClient.Do(req.WithContext(ctx))
+ if err != nil {
+ return 0, fmt.Errorf("error getting %s: %s", url, err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("error getting %s: HTTP status %s", url, resp.Status)
+ }
+ var cred iamCredentials
+ err = json.NewDecoder(resp.Body).Decode(&cred)
+ if err != nil {
+ return 0, fmt.Errorf("error decoding credentials from %s: %s", url, err)
+ }
+ v.AccessKey, v.SecretKey, v.AuthToken, v.AuthExpiration = cred.AccessKeyID, cred.SecretAccessKey, cred.Token, cred.Expiration
+ v.bucket.SetBucket(&s3.Bucket{
+ S3: v.newS3Client(),
+ Name: v.Bucket,
+ })
+ // TTL is time from now to expiration, minus 5m. "We make new
+ // credentials available at least five minutes before the
+ // expiration of the old credentials." --
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
+ // (If that's not true, the returned ttl might be zero or
+ // negative, which the caller can handle.)
+ ttl := cred.Expiration.Sub(time.Now()) - 5*time.Minute
+ v.logger.WithFields(logrus.Fields{
+ "AccessKeyID": cred.AccessKeyID,
+ "LastUpdated": cred.LastUpdated,
+ "Expiration": cred.Expiration,
+ "TTL": arvados.Duration(ttl),
+ }).Debug("updated credentials")
+ return ttl, nil
+}
+
func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io.ReadCloser, err error) {
ready := make(chan bool)
go func() {
func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
// Use a merge sort to find matching sets of X and recent/X.
dataL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Bucket: v.bucket.Bucket(),
Prefix: prefix,
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
}
recentL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Bucket: v.bucket.Bucket(),
Prefix: "recent/" + prefix,
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
// (PutCopy returns 200 OK if the request was received, even if the
// copy failed).
func (v *S3Volume) safeCopy(dst, src string) error {
- resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
+ resp, err := v.bucket.Bucket().PutCopy(dst, s3ACL, s3.CopyOptions{
ContentType: "application/octet-stream",
MetadataDirective: "REPLACE",
- }, v.bucket.Name+"/"+src)
+ }, v.bucket.Bucket().Name+"/"+src)
err = v.translateError(err)
if os.IsNotExist(err) {
return err
} else if err != nil {
- return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Name+"/"+src, err)
+ return fmt.Errorf("PutCopy(%q ← %q): %s", dst, v.bucket.Bucket().Name+"/"+src, err)
}
if t, err := time.Parse(time.RFC3339Nano, resp.LastModified); err != nil {
return fmt.Errorf("PutCopy succeeded but did not return a timestamp: %q: %s", resp.LastModified, err)
}
trashL := s3Lister{
- Bucket: v.bucket.Bucket,
+ Bucket: v.bucket.Bucket(),
Prefix: "trash/",
PageSize: v.IndexPageSize,
Stats: &v.bucket.stats,
return
}
-// s3bucket wraps s3.bucket and counts I/O and API usage stats.
+// s3bucket wraps s3.bucket and counts I/O and API usage stats. The
+// wrapped bucket can be replaced atomically with SetBucket in order
+// to update credentials.
type s3bucket struct {
- *s3.Bucket
- stats s3bucketStats
+ bucket *s3.Bucket
+ stats s3bucketStats
+ mu sync.Mutex
+}
+
+func (b *s3bucket) Bucket() *s3.Bucket {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.bucket
+}
+
+func (b *s3bucket) SetBucket(bucket *s3.Bucket) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.bucket = bucket
}
func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
- rdr, err := b.Bucket.GetReader(path)
+ rdr, err := b.Bucket().GetReader(path)
b.stats.TickOps("get")
b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
b.stats.TickErr(err)
}
func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
- resp, err := b.Bucket.Head(path, headers)
+ resp, err := b.Bucket().Head(path, headers)
b.stats.TickOps("head")
b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
b.stats.TickErr(err)
} else {
r = NewCountingReader(r, b.stats.TickOutBytes)
}
- err := b.Bucket.PutReader(path, r, length, contType, perm, options)
+ err := b.Bucket().PutReader(path, r, length, contType, perm, options)
b.stats.TickOps("put")
b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
b.stats.TickErr(err)
}
func (b *s3bucket) Del(path string) error {
- err := b.Bucket.Del(path)
+ err := b.Bucket().Del(path)
b.stats.TickOps("delete")
b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
b.stats.TickErr(err)
"crypto/md5"
"encoding/json"
"fmt"
+ "io"
"log"
"net/http"
"net/http/httptest"
type StubbedS3Suite struct {
s3server *httptest.Server
+ metadata *httptest.Server
cluster *arvados.Cluster
handler *handler
volumes []*TestableS3Volume
func (s *StubbedS3Suite) SetUpTest(c *check.C) {
s.s3server = nil
+ s.metadata = nil
s.cluster = testCluster(c)
s.cluster.Volumes = map[string]arvados.Volume{
"zzzzz-nyw5e-000000000000000": {Driver: "S3"},
}
}
+func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
+ s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
+ exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339)
+ // Literal example from
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
+ // but with updated timestamps
+ io.WriteString(w, `{"Code":"Success","LastUpdated":"`+upd+`","Type":"AWS-HMAC","AccessKeyId":"ASIAIOSFODNN7EXAMPLE","SecretAccessKey":"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY","Token":"token","Expiration":"`+exp+`"}`)
+ }))
+ defer s.metadata.Close()
+
+ v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+ c.Check(v.AccessKey, check.Equals, "ASIAIOSFODNN7EXAMPLE")
+ c.Check(v.SecretKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
+ c.Check(v.bucket.bucket.S3.Auth.AccessKey, check.Equals, "ASIAIOSFODNN7EXAMPLE")
+ c.Check(v.bucket.bucket.S3.Auth.SecretKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
+
+ s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ deadv := &S3Volume{
+ IAMRole: s.metadata.URL + "/fake-metadata/test-role",
+ Endpoint: "http://localhost:12345",
+ Region: "test-region-1",
+ Bucket: "test-bucket-name",
+ cluster: s.cluster,
+ logger: ctxlog.TestLogger(c),
+ metrics: newVolumeMetricsVecs(prometheus.NewRegistry()),
+ }
+ err := deadv.check()
+ c.Check(err, check.ErrorMatches, `.*/fake-metadata/test-role.*`)
+ c.Check(err, check.ErrorMatches, `.*404.*`)
+}
+
func (s *StubbedS3Suite) TestStats(c *check.C) {
v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
stats := func() string {
return
}
v.serverClock.now = &t
- v.bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
+ v.bucket.Bucket().Put(key, data, "application/octet-stream", s3ACL, s3.Options{})
}
t0 := time.Now()
endpoint = s.s3server.URL
}
+ iamRole, accessKey, secretKey := "", "xxx", "xxx"
+ if s.metadata != nil {
+ iamRole, accessKey, secretKey = s.metadata.URL+"/fake-metadata/test-role", "", ""
+ }
+
v := &TestableS3Volume{
S3Volume: &S3Volume{
- AccessKey: "xxx",
- SecretKey: "xxx",
+ AccessKey: accessKey,
+ SecretKey: secretKey,
+ IAMRole: iamRole,
Bucket: TestBucketName,
Endpoint: endpoint,
Region: "test-region-1",
serverClock: clock,
}
c.Assert(v.S3Volume.check(), check.IsNil)
- c.Assert(v.bucket.PutBucket(s3.ACL("private")), check.IsNil)
+ c.Assert(v.bucket.Bucket().PutBucket(s3.ACL("private")), check.IsNil)
// We couldn't set RaceWindow until now because check()
// rejects negative values.
v.S3Volume.RaceWindow = arvados.Duration(raceWindow)
// PutRaw skips the ContentMD5 test
func (v *TestableS3Volume) PutRaw(loc string, block []byte) {
- err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
+ err := v.bucket.Bucket().Put(loc, block, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
log.Printf("PutRaw: %s: %+v", loc, err)
}
- err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err = v.bucket.Bucket().Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
log.Printf("PutRaw: recent/%s: %+v", loc, err)
}
// while we do this.
func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) {
v.serverClock.now = &lastPut
- err := v.bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{})
+ err := v.bucket.Bucket().Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{})
if err != nil {
panic(err)
}
PATH
remote: .
specs:
- arvados-login-sync (1.4.0.20190729193732)
+ arvados-login-sync (1.4.1.20190930204434)
arvados (~> 1.3.0, >= 1.3.0)
+ faraday (< 0.16)
GEM
remote: https://rubygems.org/
i18n (>= 0.7, < 2)
minitest (~> 5.1)
tzinfo (~> 1.1)
- addressable (2.6.0)
- public_suffix (>= 2.0.2, < 4.0)
+ addressable (2.7.0)
+ public_suffix (>= 2.0.2, < 5.0)
andand (1.3.3)
arvados (1.3.3.20190320201707)
activesupport (>= 3)
extlib (0.9.16)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
- googleauth (0.8.1)
+ googleauth (0.9.0)
faraday (~> 0.12)
jwt (>= 1.4, < 3.0)
memoist (~> 0.16)
multi_json (1.13.1)
multipart-post (2.1.1)
os (1.0.1)
- public_suffix (3.1.1)
+ public_suffix (4.0.1)
rake (12.3.2)
retriable (1.4.1)
signet (0.11.0)
s.executables << "arvados-login-sync"
s.required_ruby_version = '>= 2.1.0'
s.add_runtime_dependency 'arvados', '~> 1.3.0', '>= 1.3.0'
+ # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
+ s.add_dependency('faraday', '< 0.16')
s.homepage =
'https://arvados.org'
end
cd /usr/src/composer
-npm -d install --prefix /usr/local --global yarn
+npm -d install --prefix /usr/local --global yarn@1.17.3
yarn install
cd /usr/src/workbench2
-npm -d install --prefix /usr/local --global yarn
+npm -d install --prefix /usr/local --global yarn@1.17.3
yarn install