X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/78d74f846e1b35b6b65d76c345227845d00a9722..386faadf691e444b71d6c96e7c00792d9a0ba2c7:/services/keepstore/s3_volume_test.go diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go index e41e04b363..c43b85b1c5 100644 --- a/services/keepstore/s3_volume_test.go +++ b/services/keepstore/s3_volume_test.go @@ -2,25 +2,23 @@ package main import ( "bytes" + "context" "crypto/md5" + "encoding/json" "fmt" - "log" + "io/ioutil" + "net/http" + "net/http/httptest" "os" "time" - "github.com/AdRoll/goamz/aws" + "git.curoverse.com/arvados.git/sdk/go/arvados" "github.com/AdRoll/goamz/s3" "github.com/AdRoll/goamz/s3/s3test" + log "github.com/Sirupsen/logrus" check "gopkg.in/check.v1" ) -type TestableS3Volume struct { - *S3Volume - server *s3test.Server - c *check.C - serverClock *fakeClock -} - const ( TestBucketName = "testbucket" ) @@ -42,30 +40,6 @@ func init() { s3UnsafeDelete = true } -func NewTestableS3Volume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume { - clock := &fakeClock{} - srv, err := s3test.NewServer(&s3test.Config{Clock: clock}) - c.Assert(err, check.IsNil) - auth := aws.Auth{} - region := aws.Region{ - Name: "test-region-1", - S3Endpoint: srv.URL(), - S3LocationConstraint: true, - } - bucket := &s3.Bucket{ - S3: s3.New(auth, region), - Name: TestBucketName, - } - err = bucket.PutBucket(s3.ACL("private")) - c.Assert(err, check.IsNil) - - return &TestableS3Volume{ - S3Volume: NewS3Volume(auth, region, TestBucketName, raceWindow, readonly, replication), - server: srv, - serverClock: clock, - } -} - var _ = check.Suite(&StubbedS3Suite{}) type StubbedS3Suite struct { @@ -76,19 +50,19 @@ func (s *StubbedS3Suite) TestGeneric(c *check.C) { DoGenericVolumeTests(c, func(t TB) TestableVolume { // Use a negative raceWindow so s3test's 1-second // timestamp precision doesn't confuse fixRace. - return NewTestableS3Volume(c, -2*time.Second, false, 2) + return s.newTestableVolume(c, -2*time.Second, false, 2) }) } func (s *StubbedS3Suite) TestGenericReadOnly(c *check.C) { DoGenericVolumeTests(c, func(t TB) TestableVolume { - return NewTestableS3Volume(c, -2*time.Second, true, 2) + return s.newTestableVolume(c, -2*time.Second, true, 2) }) } func (s *StubbedS3Suite) TestIndex(c *check.C) { - v := NewTestableS3Volume(c, 0, false, 2) - v.indexPageSize = 3 + v := s.newTestableVolume(c, 0, false, 2) + v.IndexPageSize = 3 for i := 0; i < 256; i++ { v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111}) } @@ -111,15 +85,132 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) { } } +func (s *StubbedS3Suite) TestStats(c *check.C) { + v := s.newTestableVolume(c, 5*time.Minute, false, 2) + stats := func() string { + buf, err := json.Marshal(v.InternalStats()) + c.Check(err, check.IsNil) + return string(buf) + } + + c.Check(stats(), check.Matches, `.*"Ops":0,.*`) + + loc := "acbd18db4cc2f85cedef654fccc4a4d8" + _, err := v.Get(context.Background(), loc, make([]byte, 3)) + c.Check(err, check.NotNil) + c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`) + c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`) + c.Check(stats(), check.Matches, `.*"InBytes":0,.*`) + + err = v.Put(context.Background(), loc, []byte("foo")) + c.Check(err, check.IsNil) + c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`) + c.Check(stats(), check.Matches, `.*"PutOps":2,.*`) + + _, err = v.Get(context.Background(), loc, make([]byte, 3)) + c.Check(err, check.IsNil) + _, err = v.Get(context.Background(), loc, make([]byte, 3)) + c.Check(err, check.IsNil) + c.Check(stats(), check.Matches, `.*"InBytes":6,.*`) +} + +type blockingHandler struct { + requested chan *http.Request + unblock chan struct{} +} + +func (h *blockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if h.requested != nil { + h.requested <- r + } + if h.unblock != nil { + <-h.unblock + } + http.Error(w, "nothing here", http.StatusNotFound) +} + +func (s *StubbedS3Suite) TestGetContextCancel(c *check.C) { + loc := "acbd18db4cc2f85cedef654fccc4a4d8" + buf := make([]byte, 3) + + s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error { + _, err := v.Get(ctx, loc, buf) + return err + }) +} + +func (s *StubbedS3Suite) TestCompareContextCancel(c *check.C) { + loc := "acbd18db4cc2f85cedef654fccc4a4d8" + buf := []byte("bar") + + s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error { + return v.Compare(ctx, loc, buf) + }) +} + +func (s *StubbedS3Suite) TestPutContextCancel(c *check.C) { + loc := "acbd18db4cc2f85cedef654fccc4a4d8" + buf := []byte("foo") + + s.testContextCancel(c, func(ctx context.Context, v *TestableS3Volume) error { + return v.Put(ctx, loc, buf) + }) +} + +func (s *StubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3Volume) error) { + handler := &blockingHandler{} + srv := httptest.NewServer(handler) + defer srv.Close() + + v := s.newTestableVolume(c, 5*time.Minute, false, 2) + vol := *v.S3Volume + vol.Endpoint = srv.URL + v = &TestableS3Volume{S3Volume: &vol} + v.Start() + + ctx, cancel := context.WithCancel(context.Background()) + + handler.requested = make(chan *http.Request) + handler.unblock = make(chan struct{}) + defer close(handler.unblock) + + doneFunc := make(chan struct{}) + go func() { + err := testFunc(ctx, v) + c.Check(err, check.Equals, context.Canceled) + close(doneFunc) + }() + + timeout := time.After(10 * time.Second) + + // Wait for the stub server to receive a request, meaning + // Get() is waiting for an s3 operation. + select { + case <-timeout: + c.Fatal("timed out waiting for test func to call our handler") + case <-doneFunc: + c.Fatal("test func finished without even calling our handler!") + case <-handler.requested: + } + + cancel() + + select { + case <-timeout: + c.Fatal("timed out") + case <-doneFunc: + } +} + func (s *StubbedS3Suite) TestBackendStates(c *check.C) { - defer func(tl, bs time.Duration) { - trashLifetime = tl - blobSignatureTTL = bs - }(trashLifetime, blobSignatureTTL) - trashLifetime = time.Hour - blobSignatureTTL = time.Hour - - v := NewTestableS3Volume(c, 5*time.Minute, false, 2) + defer func(tl, bs arvados.Duration) { + theConfig.TrashLifetime = tl + theConfig.BlobSignatureTTL = bs + }(theConfig.TrashLifetime, theConfig.BlobSignatureTTL) + theConfig.TrashLifetime.Set("1h") + theConfig.BlobSignatureTTL.Set("1h") + + v := s.newTestableVolume(c, 5*time.Minute, false, 2) var none time.Time putS3Obj := func(t time.Time, key string, data []byte) { @@ -127,7 +218,7 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { return } v.serverClock.now = &t - v.Bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{}) + v.bucket.Put(key, data, "application/octet-stream", s3ACL, s3.Options{}) } t0 := time.Now() @@ -214,12 +305,12 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { false, false, false, true, false, false, }, { - "Erroneously trashed during a race, detected before trashLifetime", + "Erroneously trashed during a race, detected before TrashLifetime", none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute), true, false, true, true, true, false, }, { - "Erroneously trashed during a race, rescue during EmptyTrash despite reaching trashLifetime", + "Erroneously trashed during a race, rescue during EmptyTrash despite reaching TrashLifetime", none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute), true, false, true, true, true, false, }, @@ -250,30 +341,43 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { return loc, blk } + // Check canGet loc, blk := setupScenario() buf := make([]byte, len(blk)) - _, err := v.Get(loc, buf) + _, err := v.Get(context.Background(), loc, buf) c.Check(err == nil, check.Equals, scenario.canGet) if err != nil { c.Check(os.IsNotExist(err), check.Equals, true) } + // Call Trash, then check canTrash and canGetAfterTrash loc, blk = setupScenario() err = v.Trash(loc) c.Check(err == nil, check.Equals, scenario.canTrash) - _, err = v.Get(loc, buf) + _, err = v.Get(context.Background(), loc, buf) c.Check(err == nil, check.Equals, scenario.canGetAfterTrash) if err != nil { c.Check(os.IsNotExist(err), check.Equals, true) } + // Call Untrash, then check canUntrash loc, blk = setupScenario() err = v.Untrash(loc) c.Check(err == nil, check.Equals, scenario.canUntrash) + if scenario.dataT != none || scenario.trashT != none { + // In all scenarios where the data exists, we + // should be able to Get after Untrash -- + // regardless of timestamps, errors, race + // conditions, etc. + _, err = v.Get(context.Background(), loc, buf) + c.Check(err, check.IsNil) + } + // Call EmptyTrash, then check haveTrashAfterEmpty and + // freshAfterEmpty loc, blk = setupScenario() v.EmptyTrash() - _, err = v.Bucket.Head("trash/"+loc, nil) + _, err = v.bucket.Head("trash/"+loc, nil) c.Check(err == nil, check.Equals, scenario.haveTrashAfterEmpty) if scenario.freshAfterEmpty { t, err := v.Mtime(loc) @@ -282,12 +386,70 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) { // allowance for 1s timestamp precision) c.Check(t.After(t0.Add(-time.Second)), check.Equals, true) } + + // Check for current Mtime after Put (applies to all + // scenarios) + loc, blk = setupScenario() + err = v.Put(context.Background(), loc, blk) + c.Check(err, check.IsNil) + t, err := v.Mtime(loc) + c.Check(err, check.IsNil) + c.Check(t.After(t0.Add(-time.Second)), check.Equals, true) } } +type TestableS3Volume struct { + *S3Volume + server *s3test.Server + c *check.C + serverClock *fakeClock +} + +func (s *StubbedS3Suite) newTestableVolume(c *check.C, raceWindow time.Duration, readonly bool, replication int) *TestableS3Volume { + clock := &fakeClock{} + srv, err := s3test.NewServer(&s3test.Config{Clock: clock}) + c.Assert(err, check.IsNil) + + v := &TestableS3Volume{ + S3Volume: &S3Volume{ + Bucket: TestBucketName, + Endpoint: srv.URL(), + Region: "test-region-1", + LocationConstraint: true, + RaceWindow: arvados.Duration(raceWindow), + S3Replication: replication, + UnsafeDelete: s3UnsafeDelete, + ReadOnly: readonly, + IndexPageSize: 1000, + }, + c: c, + server: srv, + serverClock: clock, + } + v.Start() + err = v.bucket.PutBucket(s3.ACL("private")) + c.Assert(err, check.IsNil) + return v +} + +func (v *TestableS3Volume) Start() error { + tmp, err := ioutil.TempFile("", "keepstore") + v.c.Assert(err, check.IsNil) + defer os.Remove(tmp.Name()) + _, err = tmp.Write([]byte("xxx\n")) + v.c.Assert(err, check.IsNil) + v.c.Assert(tmp.Close(), check.IsNil) + + v.S3Volume.AccessKeyFile = tmp.Name() + v.S3Volume.SecretKeyFile = tmp.Name() + + v.c.Assert(v.S3Volume.Start(), check.IsNil) + return nil +} + // PutRaw skips the ContentMD5 test func (v *TestableS3Volume) PutRaw(loc string, block []byte) { - err := v.Bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{}) + err := v.bucket.Put(loc, block, "application/octet-stream", s3ACL, s3.Options{}) if err != nil { log.Printf("PutRaw: %+v", err) } @@ -298,7 +460,7 @@ func (v *TestableS3Volume) PutRaw(loc string, block []byte) { // while we do this. func (v *TestableS3Volume) TouchWithDate(locator string, lastPut time.Time) { v.serverClock.now = &lastPut - err := v.Bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{}) + err := v.bucket.Put("recent/"+locator, nil, "application/octet-stream", s3ACL, s3.Options{}) if err != nil { panic(err) }