// Tests for Keep HTTP handlers:
//
-// GetBlockHandler
-// PutBlockHandler
-// IndexHandler
+// - GetBlockHandler
+// - PutBlockHandler
+// - IndexHandler
//
// The HTTP handlers are responsible for enforcing permission policy,
// so these tests must exercise all possible permission permutations.
-package main
+package keepstore
import (
"bytes"
"net/http"
"net/http/httptest"
"os"
- "regexp"
"sort"
"strings"
+ "sync/atomic"
"time"
"git.arvados.org/arvados.git/lib/config"
// - permissions on, unauthenticated request, signed locator
// - permissions on, authenticated request, expired locator
// - permissions on, authenticated request, signed locator, transient error from backend
-//
func (s *HandlerSuite) TestGetHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// - no server key
// - with server key, authenticated request, unsigned locator
// - with server key, unauthenticated request, unsigned locator
-//
func (s *HandlerSuite) TestPutHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
}
}
+func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-111111111111111": {
+ Driver: "mock",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true}},
+ "zzzzz-nyw5e-222222222222222": {
+ Driver: "mock",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class2": true, "class3": true}},
+ }
+
+ for _, trial := range []struct {
+ priority1 int // priority of class1, thus vol1
+ priority2 int // priority of class2
+ priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
+ get1 int // expected number of "get" ops on vol1
+ get2 int // expected number of "get" ops on vol2
+ }{
+ {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2
+ {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
+ {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1
+ {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+ } {
+ c.Logf("%+v", trial)
+ s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ "class1": {Priority: trial.priority1},
+ "class2": {Priority: trial.priority2},
+ "class3": {Priority: trial.priority3},
+ }
+ c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+ IssueRequest(s.handler,
+ &RequestTester{
+ method: "PUT",
+ uri: "/" + TestHash,
+ requestBody: TestBlock,
+ storageClasses: "class1",
+ })
+ IssueRequest(s.handler,
+ &RequestTester{
+ method: "GET",
+ uri: "/" + TestHash,
+ })
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
+ }
+}
+
+func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-111111111111111": {
+ Driver: "mock",
+ Replication: 1,
+ ReadOnly: true,
+ StorageClasses: map[string]bool{"class1": true}},
+ }
+ c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+ resp := IssueRequest(s.handler,
+ &RequestTester{
+ method: "PUT",
+ uri: "/" + TestHash,
+ requestBody: TestBlock,
+ storageClasses: "class1",
+ })
+ c.Check(resp.Code, check.Equals, FullError.HTTPCode)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
+}
+
+func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
+ s.cluster.Volumes = map[string]arvados.Volume{
+ "zzzzz-nyw5e-111111111111111": {
+ Driver: "mock",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true}},
+ "zzzzz-nyw5e-121212121212121": {
+ Driver: "mock",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class1": true, "class2": true}},
+ "zzzzz-nyw5e-222222222222222": {
+ Driver: "mock",
+ Replication: 1,
+ StorageClasses: map[string]bool{"class2": true}},
+ }
+
+ for _, trial := range []struct {
+ setCounter uint32 // value to stuff vm.counter, to control offset
+ classes string // desired classes
+ put111 int // expected number of "put" ops on 11111... after 2x put reqs
+ put121 int // expected number of "put" ops on 12121...
+ put222 int // expected number of "put" ops on 22222...
+ cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs
+ cmp121 int // expected number of "compare" ops on 12121...
+ cmp222 int // expected number of "compare" ops on 22222...
+ }{
+ {0, "class1",
+ 1, 0, 0,
+ 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
+ {0, "class2",
+ 0, 1, 0,
+ 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
+ {0, "class1,class2",
+ 1, 1, 0,
+ 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
+ {1, "class1,class2",
+ 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
+ 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
+ {0, "class1,class2,class404",
+ 1, 1, 0,
+ 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
+ } {
+ c.Logf("%+v", trial)
+ s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+ "class1": {},
+ "class2": {},
+ "class3": {},
+ }
+ c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+ atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
+ for i := 0; i < 2; i++ {
+ IssueRequest(s.handler,
+ &RequestTester{
+ method: "PUT",
+ uri: "/" + TestHash,
+ requestBody: TestBlock,
+ storageClasses: trial.classes,
+ })
+ }
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
+ c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
+ }
+}
+
// Test TOUCH requests.
func (s *HandlerSuite) TestTouchHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
//
// The only /index requests that should succeed are those issued by the
// superuser. They should pass regardless of the value of BlobSigning.
-//
func (s *HandlerSuite) TestIndexHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
expected := `^` + TestHash + `\+\d+ \d+\n` +
TestHash2 + `\+\d+ \d+\n\n$`
- match, _ := regexp.MatchString(expected, response.Body.String())
- if !match {
- c.Errorf(
- "permissions on, superuser request: expected %s, got:\n%s",
- expected, response.Body.String())
- }
+ c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+ "permissions on, superuser request"))
// superuser /index/prefix request
// => OK
response)
expected = `^` + TestHash + `\+\d+ \d+\n\n$`
- match, _ = regexp.MatchString(expected, response.Body.String())
- if !match {
- c.Errorf(
- "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
- expected, response.Body.String())
- }
+ c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+ "permissions on, superuser /index/prefix request"))
// superuser /index/{no-such-prefix} request
// => OK
//
// Cases tested:
//
-// With no token and with a non-data-manager token:
-// * Delete existing block
-// (test for 403 Forbidden, confirm block not deleted)
-//
-// With data manager token:
+// With no token and with a non-data-manager token:
+// * Delete existing block
+// (test for 403 Forbidden, confirm block not deleted)
//
-// * Delete existing block
-// (test for 200 OK, response counts, confirm block deleted)
+// With data manager token:
//
-// * Delete nonexistent block
-// (test for 200 OK, response counts)
+// * Delete existing block
+// (test for 200 OK, response counts, confirm block deleted)
//
-// TODO(twp):
+// * Delete nonexistent block
+// (test for 200 OK, response counts)
//
-// * Delete block on read-only and read-write volume
-// (test for 200 OK, response with copies_deleted=1,
-// copies_failed=1, confirm block deleted only on r/w volume)
+// TODO(twp):
//
-// * Delete block on read-only volume only
-// (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-// confirm block not deleted)
+// * Delete block on read-only and read-write volume
+// (test for 200 OK, response with copies_deleted=1,
+// copies_failed=1, confirm block deleted only on r/w volume)
//
+// * Delete block on read-only volume only
+// (test for 200 OK, response with copies_deleted=0, copies_failed=1,
+// confirm block not deleted)
func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Cases tested: syntactically valid and invalid pull lists, from the
// data manager and from unprivileged users:
//
-// 1. Valid pull list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 1. Valid pull list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 2. Invalid pull request from an ordinary user
-// (expected result: 401 Unauthorized)
+// 2. Invalid pull request from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 3. Valid pull request from the data manager
-// (expected result: 200 OK with request body "Received 3 pull
-// requests"
+// 3. Valid pull request from the data manager
+// (expected result: 200 OK with request body "Received 3 pull
+// requests"
//
-// 4. Invalid pull request from the data manager
-// (expected result: 400 Bad Request)
+// 4. Invalid pull request from the data manager
+// (expected result: 400 Bad Request)
//
// Test that in the end, the pull manager received a good pull list with
// the expected number of requests.
// TODO(twp): test concurrency: launch 100 goroutines to update the
// pull list simultaneously. Make sure that none of them return 400
// Bad Request and that pullq.GetList() returns a valid list.
-//
func (s *HandlerSuite) TestPullHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Cases tested: syntactically valid and invalid trash lists, from the
// data manager and from unprivileged users:
//
-// 1. Valid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 1. Valid trash list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 2. Invalid trash list from an ordinary user
-// (expected result: 401 Unauthorized)
+// 2. Invalid trash list from an ordinary user
+// (expected result: 401 Unauthorized)
//
-// 3. Valid trash list from the data manager
-// (expected result: 200 OK with request body "Received 3 trash
-// requests"
+// 3. Valid trash list from the data manager
+// (expected result: 200 OK with request body "Received 3 trash
+// requests"
//
-// 4. Invalid trash list from the data manager
-// (expected result: 400 Bad Request)
+// 4. Invalid trash list from the data manager
+// (expected result: 400 Bad Request)
//
// Test that in the end, the trash collector received a good list
// trash list with the expected number of requests.
// TODO(twp): test concurrency: launch 100 goroutines to update the
// pull list simultaneously. Make sure that none of them return 400
// Bad Request and that replica.Dump() returns a valid list.
-//
func (s *HandlerSuite) TestTrashHandler(c *check.C) {
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
// Replace the router's trashq -- which the worker goroutines
}
}
-type notifyingResponseRecorder struct {
- *httptest.ResponseRecorder
- closer chan bool
-}
-
-func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
- return r.closer
-}
-
func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
s.cluster.Collections.BlobSigning = false
c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
defer bufs.Put(bufs.Get(BlockSize))
- if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
- c.Error(err)
- }
-
- resp := ¬ifyingResponseRecorder{
- ResponseRecorder: httptest.NewRecorder(),
- closer: make(chan bool, 1),
- }
- if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
- c.Fatal("notifyingResponseRecorder is broken")
- }
- // If anyone asks, the client has disconnected.
- resp.closer <- true
+ err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
+ c.Assert(err, check.IsNil)
+ resp := httptest.NewRecorder()
ok := make(chan struct{})
go func() {
- req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
+ ctx, cancel := context.WithCancel(context.Background())
+ req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
+ cancel()
s.handler.ServeHTTP(resp, req)
ok <- struct{}{}
}()
case <-ok:
}
- ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
+ ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
for i, v := range s.handler.volmgr.AllWritable() {
if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
c.Errorf("volume %d got %d calls, expected 0", i, calls)