X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/d3b76845c740935f7474f535d308303c748b0b4a..d90fffe9d937d3e05c04106904b18dc4da235bc6:/services/keepstore/handler_test.go diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go index 897447dd11..5bdafb77c2 100644 --- a/services/keepstore/handler_test.go +++ b/services/keepstore/handler_test.go @@ -4,14 +4,14 @@ // Tests for Keep HTTP handlers: // -// GetBlockHandler -// PutBlockHandler -// IndexHandler +// - GetBlockHandler +// - PutBlockHandler +// - IndexHandler // // The HTTP handlers are responsible for enforcing permission policy, // so these tests must exercise all possible permission permutations. -package main +package keepstore import ( "bytes" @@ -23,6 +23,7 @@ import ( "os" "sort" "strings" + "sync/atomic" "time" "git.arvados.org/arvados.git/lib/config" @@ -85,7 +86,6 @@ type RequestTester struct { // - permissions on, unauthenticated request, signed locator // - permissions on, authenticated request, expired locator // - permissions on, authenticated request, signed locator, transient error from backend -// func (s *HandlerSuite) TestGetHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -205,7 +205,6 @@ func (s *HandlerSuite) TestGetHandler(c *check.C) { // - no server key // - with server key, authenticated request, unsigned locator // - with server key, unauthenticated request, unsigned locator -// func (s *HandlerSuite) TestPutHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -367,6 +366,94 @@ func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) { } } +func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-111111111111111": { + Driver: "mock", + Replication: 1, + ReadOnly: true, + StorageClasses: map[string]bool{"class1": true}}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + resp := IssueRequest(s.handler, + &RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + storageClasses: "class1", + }) + c.Check(resp.Code, check.Equals, FullError.HTTPCode) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0) +} + +func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-111111111111111": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class1": true}}, + "zzzzz-nyw5e-121212121212121": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class1": true, "class2": true}}, + "zzzzz-nyw5e-222222222222222": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class2": true}}, + } + + for _, trial := range []struct { + setCounter uint32 // value to stuff vm.counter, to control offset + classes string // desired classes + put111 int // expected number of "put" ops on 11111... after 2x put reqs + put121 int // expected number of "put" ops on 12121... + put222 int // expected number of "put" ops on 22222... + cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs + cmp121 int // expected number of "compare" ops on 12121... + cmp222 int // expected number of "compare" ops on 22222... + }{ + {0, "class1", + 1, 0, 0, + 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121 + {0, "class2", + 0, 1, 0, + 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121 + {0, "class1,class2", + 1, 1, 0, + 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121 + {1, "class1,class2", + 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121 + 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121 + {0, "class1,class2,class404", + 1, 1, 0, + 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121 + } { + c.Logf("%+v", trial) + s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{ + "class1": {}, + "class2": {}, + "class3": {}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter) + for i := 0; i < 2; i++ { + IssueRequest(s.handler, + &RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + storageClasses: trial.classes, + }) + } + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222) + } +} + // Test TOUCH requests. func (s *HandlerSuite) TestTouchHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -428,7 +515,6 @@ func (s *HandlerSuite) TestTouchHandler(c *check.C) { // // The only /index requests that should succeed are those issued by the // superuser. They should pass regardless of the value of BlobSigning. -// func (s *HandlerSuite) TestIndexHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -584,28 +670,27 @@ func (s *HandlerSuite) TestIndexHandler(c *check.C) { // // Cases tested: // -// With no token and with a non-data-manager token: -// * Delete existing block -// (test for 403 Forbidden, confirm block not deleted) +// With no token and with a non-data-manager token: +// * Delete existing block +// (test for 403 Forbidden, confirm block not deleted) // -// With data manager token: +// With data manager token: // -// * Delete existing block -// (test for 200 OK, response counts, confirm block deleted) +// * Delete existing block +// (test for 200 OK, response counts, confirm block deleted) // -// * Delete nonexistent block -// (test for 200 OK, response counts) +// * Delete nonexistent block +// (test for 200 OK, response counts) // -// TODO(twp): +// TODO(twp): // -// * Delete block on read-only and read-write volume -// (test for 200 OK, response with copies_deleted=1, -// copies_failed=1, confirm block deleted only on r/w volume) -// -// * Delete block on read-only volume only -// (test for 200 OK, response with copies_deleted=0, copies_failed=1, -// confirm block not deleted) +// * Delete block on read-only and read-write volume +// (test for 200 OK, response with copies_deleted=1, +// copies_failed=1, confirm block deleted only on r/w volume) // +// * Delete block on read-only volume only +// (test for 200 OK, response with copies_deleted=0, copies_failed=1, +// confirm block not deleted) func (s *HandlerSuite) TestDeleteHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -734,18 +819,18 @@ func (s *HandlerSuite) TestDeleteHandler(c *check.C) { // Cases tested: syntactically valid and invalid pull lists, from the // data manager and from unprivileged users: // -// 1. Valid pull list from an ordinary user -// (expected result: 401 Unauthorized) +// 1. Valid pull list from an ordinary user +// (expected result: 401 Unauthorized) // -// 2. Invalid pull request from an ordinary user -// (expected result: 401 Unauthorized) +// 2. Invalid pull request from an ordinary user +// (expected result: 401 Unauthorized) // -// 3. Valid pull request from the data manager -// (expected result: 200 OK with request body "Received 3 pull -// requests" +// 3. Valid pull request from the data manager +// (expected result: 200 OK with request body "Received 3 pull +// requests" // -// 4. Invalid pull request from the data manager -// (expected result: 400 Bad Request) +// 4. Invalid pull request from the data manager +// (expected result: 400 Bad Request) // // Test that in the end, the pull manager received a good pull list with // the expected number of requests. @@ -753,7 +838,6 @@ func (s *HandlerSuite) TestDeleteHandler(c *check.C) { // TODO(twp): test concurrency: launch 100 goroutines to update the // pull list simultaneously. Make sure that none of them return 400 // Bad Request and that pullq.GetList() returns a valid list. -// func (s *HandlerSuite) TestPullHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -849,18 +933,18 @@ func (s *HandlerSuite) TestPullHandler(c *check.C) { // Cases tested: syntactically valid and invalid trash lists, from the // data manager and from unprivileged users: // -// 1. Valid trash list from an ordinary user -// (expected result: 401 Unauthorized) +// 1. Valid trash list from an ordinary user +// (expected result: 401 Unauthorized) // -// 2. Invalid trash list from an ordinary user -// (expected result: 401 Unauthorized) +// 2. Invalid trash list from an ordinary user +// (expected result: 401 Unauthorized) // -// 3. Valid trash list from the data manager -// (expected result: 200 OK with request body "Received 3 trash -// requests" +// 3. Valid trash list from the data manager +// (expected result: 200 OK with request body "Received 3 trash +// requests" // -// 4. Invalid trash list from the data manager -// (expected result: 400 Bad Request) +// 4. Invalid trash list from the data manager +// (expected result: 400 Bad Request) // // Test that in the end, the trash collector received a good list // trash list with the expected number of requests. @@ -868,7 +952,6 @@ func (s *HandlerSuite) TestPullHandler(c *check.C) { // TODO(twp): test concurrency: launch 100 goroutines to update the // pull list simultaneously. Make sure that none of them return 400 // Bad Request and that replica.Dump() returns a valid list. -// func (s *HandlerSuite) TestTrashHandler(c *check.C) { c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) // Replace the router's trashq -- which the worker goroutines @@ -1065,15 +1148,6 @@ func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) { } } -type notifyingResponseRecorder struct { - *httptest.ResponseRecorder - closer chan bool -} - -func (r *notifyingResponseRecorder) CloseNotify() <-chan bool { - return r.closer -} - func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) { s.cluster.Collections.BlobSigning = false c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) @@ -1084,23 +1158,15 @@ func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) { bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize) defer bufs.Put(bufs.Get(BlockSize)) - if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil { - c.Error(err) - } - - resp := ¬ifyingResponseRecorder{ - ResponseRecorder: httptest.NewRecorder(), - closer: make(chan bool, 1), - } - if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok { - c.Fatal("notifyingResponseRecorder is broken") - } - // If anyone asks, the client has disconnected. - resp.closer <- true + err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock) + c.Assert(err, check.IsNil) + resp := httptest.NewRecorder() ok := make(chan struct{}) go func() { - req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil) + ctx, cancel := context.WithCancel(context.Background()) + req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil) + cancel() s.handler.ServeHTTP(resp, req) ok <- struct{}{} }() @@ -1111,7 +1177,7 @@ func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) { case <-ok: } - ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder) + ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp) for i, v := range s.handler.volmgr.AllWritable() { if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 { c.Errorf("volume %d got %d calls, expected 0", i, calls)