X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/2e104941dbf1e4bf92e0632cadeb946be0595d67..7407f41105f8000bb3908d41a31daaf3a30d9440:/services/keepstore/s3_volume_test.go diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go index c43b85b1c5..2736f00b74 100644 --- a/services/keepstore/s3_volume_test.go +++ b/services/keepstore/s3_volume_test.go @@ -1,3 +1,7 @@ +// Copyright (C) The Arvados Authors. All rights reserved. +// +// SPDX-License-Identifier: AGPL-3.0 + package main import ( @@ -6,16 +10,19 @@ import ( "crypto/md5" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "os" + "strings" "time" - "git.curoverse.com/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/ctxlog" "github.com/AdRoll/goamz/s3" "github.com/AdRoll/goamz/s3/s3test" - log "github.com/Sirupsen/logrus" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" check "gopkg.in/check.v1" ) @@ -34,34 +41,43 @@ func (c *fakeClock) Now() time.Time { return *c.now } -func init() { - // Deleting isn't safe from races, but if it's turned on - // anyway we do expect it to pass the generic volume tests. - s3UnsafeDelete = true -} - var _ = check.Suite(&StubbedS3Suite{}) type StubbedS3Suite struct { - volumes []*TestableS3Volume + s3server *httptest.Server + metadata *httptest.Server + cluster *arvados.Cluster + handler *handler + volumes []*TestableS3Volume +} + +func (s *StubbedS3Suite) SetUpTest(c *check.C) { + s.s3server = nil + s.metadata = nil + s.cluster = testCluster(c) + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-000000000000000": {Driver: "S3"}, + "zzzzz-nyw5e-111111111111111": {Driver: "S3"}, + } + s.handler = &handler{} } func (s *StubbedS3Suite) TestGeneric(c *check.C) { - DoGenericVolumeTests(c, func(t TB) TestableVolume { + DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume { // Use a negative raceWindow so s3test's 1-second // timestamp precision doesn't confuse fixRace. - return s.newTestableVolume(c, -2*time.Second, false, 2) + return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second) }) } func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) { - DoGenericVolumeTests(c, func(t TB) TestableVolume { - return s.newTestableVolume(c, -2*time.Second, true, 2) + DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume { + return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second) }) } func (s *StubbedS3Suite) TestIndex(c *check.C) { - v := s.newTestableVolume(c, 0, false, 2) + v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 0) v.IndexPageSize = 3 for i := 0; i < 256; i++ { v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111}) @@ -85,8 +101,91 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) { } } +func (s *StubbedS3Suite) TestSignatureVersion(c *check.C) { + var header http.Header + stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + header = r.Header + })) + defer stub.Close() + + // Default V4 signature + vol := S3Volume{ + S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{ + AccessKey: "xxx", + SecretKey: "xxx", + Endpoint: stub.URL, + Region: "test-region-1", + Bucket: "test-bucket-name", + }, + cluster: s.cluster, + logger: ctxlog.TestLogger(c), + metrics: newVolumeMetricsVecs(prometheus.NewRegistry()), + } + err := vol.check() + c.Check(err, check.IsNil) + err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo")) + c.Check(err, check.IsNil) + c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`) + + // Force V2 signature + vol = S3Volume{ + S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{ + AccessKey: "xxx", + SecretKey: "xxx", + Endpoint: stub.URL, + Region: "test-region-1", + Bucket: "test-bucket-name", + V2Signature: true, + }, + cluster: s.cluster, + logger: ctxlog.TestLogger(c), + metrics: newVolumeMetricsVecs(prometheus.NewRegistry()), + } + err = vol.check() + c.Check(err, check.IsNil) + err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo")) + c.Check(err, check.IsNil) + c.Check(header.Get("Authorization"), check.Matches, `AWS xxx:.*`) +} + +func (s *StubbedS3Suite) TestIAMRoleCredentials(c *check.C) { + s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339) + exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339) + // Literal example from + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials + // but with updated timestamps + io.WriteString(w, `{"Code":"Success","LastUpdated":"`+upd+`","Type":"AWS-HMAC","AccessKeyId":"ASIAIOSFODNN7EXAMPLE","SecretAccessKey":"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY","Token":"token","Expiration":"`+exp+`"}`) + })) + defer s.metadata.Close() + + v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute) + c.Check(v.AccessKey, check.Equals, "ASIAIOSFODNN7EXAMPLE") + c.Check(v.SecretKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY") + c.Check(v.bucket.bucket.S3.Auth.AccessKey, check.Equals, "ASIAIOSFODNN7EXAMPLE") + c.Check(v.bucket.bucket.S3.Auth.SecretKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY") + + s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + deadv := &S3Volume{ + S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{ + IAMRole: s.metadata.URL + "/fake-metadata/test-role", + Endpoint: "http://localhost:12345", + Region: "test-region-1", + Bucket: "test-bucket-name", + }, + cluster: s.cluster, + logger: ctxlog.TestLogger(c), + metrics: newVolumeMetricsVecs(prometheus.NewRegistry()), + } + err := deadv.check() + c.Check(err, check.ErrorMatches, `.*/fake-metadata/test-role.*`) + c.Check(err, check.ErrorMatches, `.*404.*`) +} + func (s *StubbedS3Suite) TestStats(c *check.C) { - v := s.newTestableVolume(c, 5*time.Minute, false, 2) + v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute) stats := func() string { buf, err := json.Marshal(v.InternalStats()) c.Check(err, check.IsNil) @@ -120,6 +219,11 @@ type blockingHandler struct { } func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method == "PUT" && !strings.Contains(strings.Trim(r.URL.Path, "/"), "/") { + // Accept PutBucket ("PUT /bucketname/"), called by + // newTestableVolume + return + } if h.requested != nil { h.requested <- r } @@ -159,14 +263,10 @@ func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) { func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) { handler := &blockingHandler{} - srv := httptest.NewServer(handler) - defer srv.Close() + s.s3server = httptest.NewServer(handler) + defer s.s3server.Close() - v := s.newTestableVolume(c, 5*time.Minute, false, 2) - vol := *v.S3Volume - vol.Endpoint = srv.URL - v = &TestableS3Volume{S3Volume: &vol} - v.Start() + v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute) ctx, cancel := context.WithCancel(context.Background()) @@ -203,14 +303,10 @@ func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Con } func (s *StubbedS3Suite) TestBackendStates(c *check.C) { - defer func(tl, bs arvados.Duration) { - theConfig.TrashLifetime = tl - theConfig.BlobSignatureTTL = bs - }(theConfig.TrashLifetime, theConfig.BlobSignatureTTL) - theConfig.TrashLifetime.Set("1h") - theConfig.BlobSignatureTTL.Set("1h") - - v := s.newTestableVolume(c, 5*time.Minute, false, 2) + s.cluster.Collections.BlobTrashLifetime.Set("1h") + s.cluster.Collections.BlobSigningTTL.Set("1h") + + v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute) var none time.Time putS3Obj := func(t time.Time, key string, data []byte) { @@ -218,7 +314,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { return } v.serverClock.now = &t - v.bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{}) + v.bucket.Bucket().Put(key, data, "application/octet-stream", s3ACL, s3.Options{}) } t0 := time.Now() @@ -305,12 +401,12 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { false, false, false, true, false, false, }, { - "Erroneously trashed during a race, detected before TrashLifetime", + "Erroneously trashed during a race, detected before BlobTrashLifetime", none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute), true, false, true, true, true, false, }, { - "Erroneously trashed during a race, rescue during EmptyTrash despite reaching TrashLifetime", + "Erroneously trashed during a race, rescue during EmptyTrash despite reaching BlobTrashLifetime", none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute), true, false, true, true, true, false, }, @@ -351,7 +447,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { } // Call Trash, then check canTrash and canGetAfterTrash - loc, blk = setupScenario() + loc, _ = setupScenario() err = v.Trash(loc) c.Check(err == nil, check.Equals, scenario.canTrash) _, err = v.Get(context.Background(), loc, buf) @@ -361,7 +457,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { } // Call Untrash, then check canUntrash - loc, blk = setupScenario() + loc, _ = setupScenario() err = v.Untrash(loc) c.Check(err == nil, check.Equals, scenario.canUntrash) if scenario.dataT != none || scenario.trashT != none { @@ -375,7 +471,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { // Call EmptyTrash, then check haveTrashAfterEmpty and // freshAfterEmpty - loc, blk = setupScenario() + loc, _ = setupScenario() v.EmptyTrash() _, err = v.bucket.Head("trash/"+loc, nil) c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty) @@ -405,53 +501,59 @@ type TestableS3Volume struct { serverClock *fakeClock } -func (s *StubbedS3Suite) newTestableVolume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume { +func (s *StubbedS3Suite) newTestableVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, raceWindow time.Duration) *TestableS3Volume { clock := &fakeClock{} srv, err := s3test.NewServer(&s3test.Config{Clock: clock}) c.Assert(err, check.IsNil) + endpoint := srv.URL() + if s.s3server != nil { + endpoint = s.s3server.URL + } + + iamRole, accessKey, secretKey := "", "xxx", "xxx" + if s.metadata != nil { + iamRole, accessKey, secretKey = s.metadata.URL+"/fake-metadata/test-role", "", "" + } v := &TestableS3Volume{ S3Volume: &S3Volume{ - Bucket: TestBucketName, - Endpoint: srv.URL(), - Region: "test-region-1", - LocationConstraint: true, - RaceWindow: arvados.Duration(raceWindow), - S3Replication: replication, - UnsafeDelete: s3UnsafeDelete, - ReadOnly: readonly, - IndexPageSize: 1000, + S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{ + IAMRole: iamRole, + AccessKey: accessKey, + SecretKey: secretKey, + Bucket: TestBucketName, + Endpoint: endpoint, + Region: "test-region-1", + LocationConstraint: true, + UnsafeDelete: true, + IndexPageSize: 1000, + }, + cluster: cluster, + volume: volume, + logger: ctxlog.TestLogger(c), + metrics: metrics, }, c: c, server: srv, serverClock: clock, } - v.Start() - err = v.bucket.PutBucket(s3.ACL("private")) - c.Assert(err, check.IsNil) + c.Assert(v.S3Volume.check(), check.IsNil) + c.Assert(v.bucket.Bucket().PutBucket(s3.ACL("private")), check.IsNil) + // We couldn't set RaceWindow until now because check() + // rejects negative values. + v.S3Volume.RaceWindow = arvados.Duration(raceWindow) return v } -func (v *TestableS3Volume) Start() error { - tmp, err := ioutil.TempFile("", "keepstore") - v.c.Assert(err, check.IsNil) - defer os.Remove(tmp.Name()) - _, err = tmp.Write([]byte("xxx\n")) - v.c.Assert(err, check.IsNil) - v.c.Assert(tmp.Close(), check.IsNil) - - v.S3Volume.AccessKeyFile = tmp.Name() - v.S3Volume.SecretKeyFile = tmp.Name() - - v.c.Assert(v.S3Volume.Start(), check.IsNil) - return nil -} - // PutRaw skips the ContentMD5 test func (v *TestableS3Volume) PutRaw(loc string, block []byte) { - err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{}) + err := v.bucket.Bucket().Put(loc, block, "application/octet-stream", s3ACL, s3.Options{}) if err != nil { - log.Printf("PutRaw: %+v", err) + v.logger.Printf("PutRaw: %s: %+v", loc, err) + } + err = v.bucket.Bucket().Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{}) + if err != nil { + v.logger.Printf("PutRaw: recent/%s: %+v", loc, err) } } @@ -460,7 +562,7 @@ func (v *TestableS3Volume) PutRaw(loc string, block []byte) { // while we do this. func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) { v.serverClock.now = &lastPut - err := v.bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{}) + err := v.bucket.Bucket().Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{}) if err != nil { panic(err) } @@ -470,3 +572,7 @@ func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) { func (v *TestableS3Volume) Teardown() { v.server.Quit() } + +func (v *TestableS3Volume) ReadWriteOperationLabelValues() (r, w string) { + return "get", "put" +}