X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/ce4285dd9a6310a799b861237918273329390316..8a27fe370239ecb8e50d53f46b45ed61203a35ca:/services/keepstore/handler_test.go diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go index c37a4d112f..d545bde0ab 100644 --- a/services/keepstore/handler_test.go +++ b/services/keepstore/handler_test.go @@ -11,7 +11,7 @@ // The HTTP handlers are responsible for enforcing permission policy, // so these tests must exercise all possible permission permutations. -package main +package keepstore import ( "bytes" @@ -21,26 +21,62 @@ import ( "net/http" "net/http/httptest" "os" - "regexp" + "sort" "strings" - "testing" + "sync/atomic" "time" - "git.curoverse.com/arvados.git/sdk/go/arvados" - "git.curoverse.com/arvados.git/sdk/go/arvadostest" + "git.arvados.org/arvados.git/lib/config" + "git.arvados.org/arvados.git/sdk/go/arvados" + "git.arvados.org/arvados.git/sdk/go/arvadostest" + "git.arvados.org/arvados.git/sdk/go/ctxlog" + "github.com/prometheus/client_golang/prometheus" + check "gopkg.in/check.v1" ) -var testCluster = &arvados.Cluster{ - ClusterID: "zzzzz", +var testServiceURL = func() arvados.URL { + return arvados.URL{Host: "localhost:12345", Scheme: "http"} +}() + +func testCluster(t TB) *arvados.Cluster { + cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load() + if err != nil { + t.Fatal(err) + } + cluster, err := cfg.GetCluster("") + if err != nil { + t.Fatal(err) + } + cluster.SystemRootToken = arvadostest.SystemRootToken + cluster.ManagementToken = arvadostest.ManagementToken + cluster.Collections.BlobSigning = false + return cluster +} + +var _ = check.Suite(&HandlerSuite{}) + +type HandlerSuite struct { + cluster *arvados.Cluster + handler *handler +} + +func (s *HandlerSuite) SetUpTest(c *check.C) { + s.cluster = testCluster(c) + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, + "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"}, + } + s.handler = &handler{} } // A RequestTester represents the parameters for an HTTP request to // be issued on behalf of a unit test. type RequestTester struct { - uri string - apiToken string - method string - requestBody []byte + uri string + apiToken string + method string + requestBody []byte + storageClasses string } // Test GetBlockHandler on the following situations: @@ -49,47 +85,43 @@ type RequestTester struct { // - permissions on, authenticated request, unsigned locator // - permissions on, unauthenticated request, signed locator // - permissions on, authenticated request, expired locator +// - permissions on, authenticated request, signed locator, transient error from backend // -func TestGetHandler(t *testing.T) { - defer teardown() - - // Prepare two test Keep volumes. Our block is stored on the second volume. - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() +func (s *HandlerSuite) TestGetHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - vols := KeepVM.AllWritable() - if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil { - t.Error(err) - } + vols := s.handler.volmgr.AllWritable() + err := vols[0].Put(context.Background(), TestHash, TestBlock) + c.Check(err, check.IsNil) // Create locators for testing. // Turn on permission settings so we can generate signed locators. - theConfig.RequireSignatures = true - theConfig.blobSigningKey = []byte(knownKey) - theConfig.BlobSignatureTTL.Set("5m") + s.cluster.Collections.BlobSigning = true + s.cluster.Collections.BlobSigningKey = knownKey + s.cluster.Collections.BlobSigningTTL.Set("5m") var ( unsignedLocator = "/" + TestHash - validTimestamp = time.Now().Add(theConfig.BlobSignatureTTL.Duration()) + validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration()) expiredTimestamp = time.Now().Add(-time.Hour) - signedLocator = "/" + SignLocator(TestHash, knownToken, validTimestamp) - expiredLocator = "/" + SignLocator(TestHash, knownToken, expiredTimestamp) + signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp) + expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp) ) // ----------------- // Test unauthenticated request with permissions off. - theConfig.RequireSignatures = false + s.cluster.Collections.BlobSigning = false // Unauthenticated request, unsigned locator // => OK - response := IssueRequest( + response := IssueRequest(s.handler, &RequestTester{ method: "GET", uri: unsignedLocator, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Unauthenticated request, unsigned locator", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "Unauthenticated request, unsigned locator", string(TestBlock), response) @@ -97,60 +129,77 @@ func TestGetHandler(t *testing.T) { receivedLen := response.Header().Get("Content-Length") expectedLen := fmt.Sprintf("%d", len(TestBlock)) if receivedLen != expectedLen { - t.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen) + c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen) } // ---------------- // Permissions: on. - theConfig.RequireSignatures = true + s.cluster.Collections.BlobSigning = true // Authenticated request, signed locator // => OK - response = IssueRequest(&RequestTester{ + response = IssueRequest(s.handler, &RequestTester{ method: "GET", uri: signedLocator, apiToken: knownToken, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Authenticated request, signed locator", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "Authenticated request, signed locator", string(TestBlock), response) receivedLen = response.Header().Get("Content-Length") expectedLen = fmt.Sprintf("%d", len(TestBlock)) if receivedLen != expectedLen { - t.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen) + c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen) } // Authenticated request, unsigned locator // => PermissionError - response = IssueRequest(&RequestTester{ + response = IssueRequest(s.handler, &RequestTester{ method: "GET", uri: unsignedLocator, apiToken: knownToken, }) - ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response) + ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response) // Unauthenticated request, signed locator // => PermissionError - response = IssueRequest(&RequestTester{ + response = IssueRequest(s.handler, &RequestTester{ method: "GET", uri: signedLocator, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Unauthenticated request, signed locator", PermissionError.HTTPCode, response) // Authenticated request, expired locator // => ExpiredError - response = IssueRequest(&RequestTester{ + response = IssueRequest(s.handler, &RequestTester{ method: "GET", uri: expiredLocator, apiToken: knownToken, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Authenticated request, expired locator", ExpiredError.HTTPCode, response) + + // Authenticated request, signed locator + // => 503 Server busy (transient error) + + // Set up the block owning volume to respond with errors + vols[0].Volume.(*MockVolume).Bad = true + vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError + response = IssueRequest(s.handler, &RequestTester{ + method: "GET", + uri: signedLocator, + apiToken: knownToken, + }) + // A transient error from one volume while the other doesn't find the block + // should make the service return a 503 so that clients can retry. + ExpectStatusCode(c, + "Volume backend busy", + 503, response) } // Test PutBlockHandler on the following situations: @@ -158,44 +207,42 @@ func TestGetHandler(t *testing.T) { // - with server key, authenticated request, unsigned locator // - with server key, unauthenticated request, unsigned locator // -func TestPutHandler(t *testing.T) { - defer teardown() - - // Prepare two test Keep volumes. - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() +func (s *HandlerSuite) TestPutHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) // -------------- // No server key. + s.cluster.Collections.BlobSigningKey = "" + // Unauthenticated request, no server key // => OK (unsigned response) unsignedLocator := "/" + TestHash - response := IssueRequest( + response := IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: unsignedLocator, requestBody: TestBlock, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Unauthenticated request, no server key", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "Unauthenticated request, no server key", TestHashPutResp, response) // ------------------ // With a server key. - theConfig.blobSigningKey = []byte(knownKey) - theConfig.BlobSignatureTTL.Set("5m") + s.cluster.Collections.BlobSigningKey = knownKey + s.cluster.Collections.BlobSigningTTL.Set("5m") // When a permission key is available, the locator returned // from an authenticated PUT request will be signed. // Authenticated PUT, signed locator // => OK (signed response) - response = IssueRequest( + response = IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: unsignedLocator, @@ -203,80 +250,263 @@ func TestPutHandler(t *testing.T) { apiToken: knownToken, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Authenticated PUT, signed locator, with server key", http.StatusOK, response) responseLocator := strings.TrimSpace(response.Body.String()) - if VerifySignature(responseLocator, knownToken) != nil { - t.Errorf("Authenticated PUT, signed locator, with server key:\n"+ + if VerifySignature(s.cluster, responseLocator, knownToken) != nil { + c.Errorf("Authenticated PUT, signed locator, with server key:\n"+ "response '%s' does not contain a valid signature", responseLocator) } // Unauthenticated PUT, unsigned locator // => OK - response = IssueRequest( + response = IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: unsignedLocator, requestBody: TestBlock, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Unauthenticated PUT, unsigned locator, with server key", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "Unauthenticated PUT, unsigned locator, with server key", TestHashPutResp, response) } -func TestPutAndDeleteSkipReadonlyVolumes(t *testing.T) { - defer teardown() - theConfig.systemAuthToken = "fake-data-manager-token" - vols := []*MockVolume{CreateMockVolume(), CreateMockVolume()} - vols[0].Readonly = true - KeepVM = MakeRRVolumeManager([]Volume{vols[0], vols[1]}) - defer KeepVM.Close() - IssueRequest( +func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) { + s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true} + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + + s.cluster.SystemRootToken = "fake-data-manager-token" + IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: "/" + TestHash, requestBody: TestBlock, }) - defer func(orig bool) { - theConfig.EnableDelete = orig - }(theConfig.EnableDelete) - theConfig.EnableDelete = true - IssueRequest( + + s.cluster.Collections.BlobTrash = true + IssueRequest(s.handler, &RequestTester{ method: "DELETE", uri: "/" + TestHash, requestBody: TestBlock, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, }) type expect struct { - volnum int + volid string method string callcount int } for _, e := range []expect{ - {0, "Get", 0}, - {0, "Compare", 0}, - {0, "Touch", 0}, - {0, "Put", 0}, - {0, "Delete", 0}, - {1, "Get", 0}, - {1, "Compare", 1}, - {1, "Touch", 1}, - {1, "Put", 1}, - {1, "Delete", 1}, + {"zzzzz-nyw5e-000000000000000", "Get", 0}, + {"zzzzz-nyw5e-000000000000000", "Compare", 0}, + {"zzzzz-nyw5e-000000000000000", "Touch", 0}, + {"zzzzz-nyw5e-000000000000000", "Put", 0}, + {"zzzzz-nyw5e-000000000000000", "Delete", 0}, + {"zzzzz-nyw5e-111111111111111", "Get", 0}, + {"zzzzz-nyw5e-111111111111111", "Compare", 1}, + {"zzzzz-nyw5e-111111111111111", "Touch", 1}, + {"zzzzz-nyw5e-111111111111111", "Put", 1}, + {"zzzzz-nyw5e-111111111111111", "Delete", 1}, + } { + if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount { + c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount) + } + } +} + +func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-111111111111111": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class1": true}}, + "zzzzz-nyw5e-222222222222222": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class2": true, "class3": true}}, + } + + for _, trial := range []struct { + priority1 int // priority of class1, thus vol1 + priority2 int // priority of class2 + priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3)) + get1 int // expected number of "get" ops on vol1 + get2 int // expected number of "get" ops on vol2 + }{ + {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2 + {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed + {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1 + {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1 + } { + c.Logf("%+v", trial) + s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{ + "class1": {Priority: trial.priority1}, + "class2": {Priority: trial.priority2}, + "class3": {Priority: trial.priority3}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + IssueRequest(s.handler, + &RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + storageClasses: "class1", + }) + IssueRequest(s.handler, + &RequestTester{ + method: "GET", + uri: "/" + TestHash, + }) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2) + } +} + +func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-111111111111111": { + Driver: "mock", + Replication: 1, + ReadOnly: true, + StorageClasses: map[string]bool{"class1": true}}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + resp := IssueRequest(s.handler, + &RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + storageClasses: "class1", + }) + c.Check(resp.Code, check.Equals, FullError.HTTPCode) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0) +} + +func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-111111111111111": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class1": true}}, + "zzzzz-nyw5e-121212121212121": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class1": true, "class2": true}}, + "zzzzz-nyw5e-222222222222222": { + Driver: "mock", + Replication: 1, + StorageClasses: map[string]bool{"class2": true}}, + } + + for _, trial := range []struct { + setCounter uint32 // value to stuff vm.counter, to control offset + classes string // desired classes + put111 int // expected number of "put" ops on 11111... after 2x put reqs + put121 int // expected number of "put" ops on 12121... + put222 int // expected number of "put" ops on 22222... + cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs + cmp121 int // expected number of "compare" ops on 12121... + cmp222 int // expected number of "compare" ops on 22222... + }{ + {0, "class1", + 1, 0, 0, + 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121 + {0, "class2", + 0, 1, 0, + 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121 + {0, "class1,class2", + 1, 1, 0, + 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121 + {1, "class1,class2", + 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121 + 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121 + {0, "class1,class2,class404", + 1, 1, 0, + 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121 } { - if calls := vols[e.volnum].CallCount(e.method); calls != e.callcount { - t.Errorf("Got %d %s() on vol %d, expect %d", calls, e.method, e.volnum, e.callcount) + c.Logf("%+v", trial) + s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{ + "class1": {}, + "class2": {}, + "class3": {}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter) + for i := 0; i < 2; i++ { + IssueRequest(s.handler, + &RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + storageClasses: trial.classes, + }) } + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121) + c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222) } } +// Test TOUCH requests. +func (s *HandlerSuite) TestTouchHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + vols := s.handler.volmgr.AllWritable() + vols[0].Put(context.Background(), TestHash, TestBlock) + vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour)) + afterPut := time.Now() + t, err := vols[0].Mtime(TestHash) + c.Assert(err, check.IsNil) + c.Assert(t.Before(afterPut), check.Equals, true) + + ExpectStatusCode(c, + "touch with no credentials", + http.StatusUnauthorized, + IssueRequest(s.handler, &RequestTester{ + method: "TOUCH", + uri: "/" + TestHash, + })) + + ExpectStatusCode(c, + "touch with non-root credentials", + http.StatusUnauthorized, + IssueRequest(s.handler, &RequestTester{ + method: "TOUCH", + uri: "/" + TestHash, + apiToken: arvadostest.ActiveTokenV2, + })) + + ExpectStatusCode(c, + "touch non-existent block", + http.StatusNotFound, + IssueRequest(s.handler, &RequestTester{ + method: "TOUCH", + uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + apiToken: s.cluster.SystemRootToken, + })) + + beforeTouch := time.Now() + ExpectStatusCode(c, + "touch block", + http.StatusOK, + IssueRequest(s.handler, &RequestTester{ + method: "TOUCH", + uri: "/" + TestHash, + apiToken: s.cluster.SystemRootToken, + })) + t, err = vols[0].Mtime(TestHash) + c.Assert(err, check.IsNil) + c.Assert(t.After(beforeTouch), check.Equals, true) +} + // Test /index requests: // - unauthenticated /index request // - unauthenticated /index/prefix request @@ -286,24 +516,20 @@ func TestPutAndDeleteSkipReadonlyVolumes(t *testing.T) { // - authenticated /index/prefix request | superuser // // The only /index requests that should succeed are those issued by the -// superuser. They should pass regardless of the value of RequireSignatures. +// superuser. They should pass regardless of the value of BlobSigning. // -func TestIndexHandler(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestIndexHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - // Set up Keep volumes and populate them. // Include multiple blocks on different volumes, and // some metadata files (which should be omitted from index listings) - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() - - vols := KeepVM.AllWritable() + vols := s.handler.volmgr.AllWritable() vols[0].Put(context.Background(), TestHash, TestBlock) vols[1].Put(context.Background(), TestHash2, TestBlock2) vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata")) vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata")) - theConfig.systemAuthToken = "DATA MANAGER TOKEN" + s.cluster.SystemRootToken = "DATA MANAGER TOKEN" unauthenticatedReq := &RequestTester{ method: "GET", @@ -317,7 +543,7 @@ func TestIndexHandler(t *testing.T) { superuserReq := &RequestTester{ method: "GET", uri: "/index", - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } unauthPrefixReq := &RequestTester{ method: "GET", @@ -331,121 +557,113 @@ func TestIndexHandler(t *testing.T) { superuserPrefixReq := &RequestTester{ method: "GET", uri: "/index/" + TestHash[0:3], - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } superuserNoSuchPrefixReq := &RequestTester{ method: "GET", uri: "/index/abcd", - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } superuserInvalidPrefixReq := &RequestTester{ method: "GET", uri: "/index/xyz", - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } // ------------------------------------------------------------- // Only the superuser should be allowed to issue /index requests. // --------------------------- - // RequireSignatures enabled + // BlobSigning enabled // This setting should not affect tests passing. - theConfig.RequireSignatures = true + s.cluster.Collections.BlobSigning = true // unauthenticated /index request // => UnauthorizedError - response := IssueRequest(unauthenticatedReq) - ExpectStatusCode(t, - "RequireSignatures on, unauthenticated request", + response := IssueRequest(s.handler, unauthenticatedReq) + ExpectStatusCode(c, + "permissions on, unauthenticated request", UnauthorizedError.HTTPCode, response) // unauthenticated /index/prefix request // => UnauthorizedError - response = IssueRequest(unauthPrefixReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, unauthPrefixReq) + ExpectStatusCode(c, "permissions on, unauthenticated /index/prefix request", UnauthorizedError.HTTPCode, response) // authenticated /index request, non-superuser // => UnauthorizedError - response = IssueRequest(authenticatedReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, authenticatedReq) + ExpectStatusCode(c, "permissions on, authenticated request, non-superuser", UnauthorizedError.HTTPCode, response) // authenticated /index/prefix request, non-superuser // => UnauthorizedError - response = IssueRequest(authPrefixReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, authPrefixReq) + ExpectStatusCode(c, "permissions on, authenticated /index/prefix request, non-superuser", UnauthorizedError.HTTPCode, response) // superuser /index request // => OK - response = IssueRequest(superuserReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserReq) + ExpectStatusCode(c, "permissions on, superuser request", http.StatusOK, response) // ---------------------------- - // RequireSignatures disabled + // BlobSigning disabled // Valid Request should still pass. - theConfig.RequireSignatures = false + s.cluster.Collections.BlobSigning = false // superuser /index request // => OK - response = IssueRequest(superuserReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserReq) + ExpectStatusCode(c, "permissions on, superuser request", http.StatusOK, response) expected := `^` + TestHash + `\+\d+ \d+\n` + TestHash2 + `\+\d+ \d+\n\n$` - match, _ := regexp.MatchString(expected, response.Body.String()) - if !match { - t.Errorf( - "permissions on, superuser request: expected %s, got:\n%s", - expected, response.Body.String()) - } + c.Check(response.Body.String(), check.Matches, expected, check.Commentf( + "permissions on, superuser request")) // superuser /index/prefix request // => OK - response = IssueRequest(superuserPrefixReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserPrefixReq) + ExpectStatusCode(c, "permissions on, superuser request", http.StatusOK, response) expected = `^` + TestHash + `\+\d+ \d+\n\n$` - match, _ = regexp.MatchString(expected, response.Body.String()) - if !match { - t.Errorf( - "permissions on, superuser /index/prefix request: expected %s, got:\n%s", - expected, response.Body.String()) - } + c.Check(response.Body.String(), check.Matches, expected, check.Commentf( + "permissions on, superuser /index/prefix request")) // superuser /index/{no-such-prefix} request // => OK - response = IssueRequest(superuserNoSuchPrefixReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserNoSuchPrefixReq) + ExpectStatusCode(c, "permissions on, superuser request", http.StatusOK, response) if "\n" != response.Body.String() { - t.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String()) + c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String()) } // superuser /index/{invalid-prefix} request // => StatusBadRequest - response = IssueRequest(superuserInvalidPrefixReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserInvalidPrefixReq) + ExpectStatusCode(c, "permissions on, superuser request", http.StatusBadRequest, response) @@ -477,27 +695,21 @@ func TestIndexHandler(t *testing.T) { // (test for 200 OK, response with copies_deleted=0, copies_failed=1, // confirm block not deleted) // -func TestDeleteHandler(t *testing.T) { - defer teardown() - - // Set up Keep volumes and populate them. - // Include multiple blocks on different volumes, and - // some metadata files (which should be omitted from index listings) - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() +func (s *HandlerSuite) TestDeleteHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - vols := KeepVM.AllWritable() + vols := s.handler.volmgr.AllWritable() vols[0].Put(context.Background(), TestHash, TestBlock) - // Explicitly set the BlobSignatureTTL to 0 for these + // Explicitly set the BlobSigningTTL to 0 for these // tests, to ensure the MockVolume deletes the blocks // even though they have just been created. - theConfig.BlobSignatureTTL = arvados.Duration(0) + s.cluster.Collections.BlobSigningTTL = arvados.Duration(0) var userToken = "NOT DATA MANAGER TOKEN" - theConfig.systemAuthToken = "DATA MANAGER TOKEN" + s.cluster.SystemRootToken = "DATA MANAGER TOKEN" - theConfig.EnableDelete = true + s.cluster.Collections.BlobTrash = true unauthReq := &RequestTester{ method: "DELETE", @@ -513,26 +725,26 @@ func TestDeleteHandler(t *testing.T) { superuserExistingBlockReq := &RequestTester{ method: "DELETE", uri: "/" + TestHash, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } superuserNonexistentBlockReq := &RequestTester{ method: "DELETE", uri: "/" + TestHash2, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } // Unauthenticated request returns PermissionError. var response *httptest.ResponseRecorder - response = IssueRequest(unauthReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, unauthReq) + ExpectStatusCode(c, "unauthenticated request", PermissionError.HTTPCode, response) // Authenticated non-admin request returns PermissionError. - response = IssueRequest(userReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, userReq) + ExpectStatusCode(c, "authenticated non-admin request", PermissionError.HTTPCode, response) @@ -544,24 +756,24 @@ func TestDeleteHandler(t *testing.T) { } var responseDc, expectedDc deletecounter - response = IssueRequest(superuserNonexistentBlockReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserNonexistentBlockReq) + ExpectStatusCode(c, "data manager request, nonexistent block", http.StatusNotFound, response) - // Authenticated admin request for existing block while EnableDelete is false. - theConfig.EnableDelete = false - response = IssueRequest(superuserExistingBlockReq) - ExpectStatusCode(t, + // Authenticated admin request for existing block while BlobTrash is false. + s.cluster.Collections.BlobTrash = false + response = IssueRequest(s.handler, superuserExistingBlockReq) + ExpectStatusCode(c, "authenticated request, existing block, method disabled", MethodDisabledError.HTTPCode, response) - theConfig.EnableDelete = true + s.cluster.Collections.BlobTrash = true // Authenticated admin request for existing block. - response = IssueRequest(superuserExistingBlockReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserExistingBlockReq) + ExpectStatusCode(c, "data manager request, existing block", http.StatusOK, response) @@ -569,7 +781,7 @@ func TestDeleteHandler(t *testing.T) { expectedDc = deletecounter{1, 0} json.NewDecoder(response.Body).Decode(&responseDc) if responseDc != expectedDc { - t.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v", + c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v", expectedDc, responseDc) } // Confirm the block has been deleted @@ -577,16 +789,16 @@ func TestDeleteHandler(t *testing.T) { _, err := vols[0].Get(context.Background(), TestHash, buf) var blockDeleted = os.IsNotExist(err) if !blockDeleted { - t.Error("superuserExistingBlockReq: block not deleted") + c.Error("superuserExistingBlockReq: block not deleted") } - // A DELETE request on a block newer than BlobSignatureTTL + // A DELETE request on a block newer than BlobSigningTTL // should return success but leave the block on the volume. vols[0].Put(context.Background(), TestHash, TestBlock) - theConfig.BlobSignatureTTL = arvados.Duration(time.Hour) + s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour) - response = IssueRequest(superuserExistingBlockReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, superuserExistingBlockReq) + ExpectStatusCode(c, "data manager request, existing block", http.StatusOK, response) @@ -594,13 +806,13 @@ func TestDeleteHandler(t *testing.T) { expectedDc = deletecounter{1, 0} json.NewDecoder(response.Body).Decode(&responseDc) if responseDc != expectedDc { - t.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v", + c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v", expectedDc, responseDc) } // Confirm the block has NOT been deleted. _, err = vols[0].Get(context.Background(), TestHash, buf) if err != nil { - t.Errorf("testing delete on new block: %s\n", err) + c.Errorf("testing delete on new block: %s\n", err) } } @@ -631,29 +843,33 @@ func TestDeleteHandler(t *testing.T) { // pull list simultaneously. Make sure that none of them return 400 // Bad Request and that pullq.GetList() returns a valid list. // -func TestPullHandler(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestPullHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - var userToken = "USER TOKEN" - theConfig.systemAuthToken = "DATA MANAGER TOKEN" + // Replace the router's pullq -- which the worker goroutines + // started by setup() are now receiving from -- with a new + // one, so we can see what the handler sends to it. + pullq := NewWorkQueue() + s.handler.Handler.(*router).pullq = pullq - pullq = NewWorkQueue() + var userToken = "USER TOKEN" + s.cluster.SystemRootToken = "DATA MANAGER TOKEN" goodJSON := []byte(`[ { - "locator":"locator_with_two_servers", + "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345", "servers":[ - "server1", - "server2" + "http://server1", + "http://server2" ] }, { - "locator":"locator_with_no_servers", + "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345", "servers":[] }, { - "locator":"", - "servers":["empty_locator"] + "locator":"cccccccccccccccccccccccccccccccc+12345", + "servers":["http://server1"] } ]`) @@ -668,46 +884,51 @@ func TestPullHandler(t *testing.T) { var testcases = []pullTest{ { "Valid pull list from an ordinary user", - RequestTester{"/pull", userToken, "PUT", goodJSON}, + RequestTester{"/pull", userToken, "PUT", goodJSON, ""}, http.StatusUnauthorized, "Unauthorized\n", }, { "Invalid pull request from an ordinary user", - RequestTester{"/pull", userToken, "PUT", badJSON}, + RequestTester{"/pull", userToken, "PUT", badJSON, ""}, http.StatusUnauthorized, "Unauthorized\n", }, { "Valid pull request from the data manager", - RequestTester{"/pull", theConfig.systemAuthToken, "PUT", goodJSON}, + RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""}, http.StatusOK, "Received 3 pull requests\n", }, { "Invalid pull request from the data manager", - RequestTester{"/pull", theConfig.systemAuthToken, "PUT", badJSON}, + RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""}, http.StatusBadRequest, "", }, } for _, tst := range testcases { - response := IssueRequest(&tst.req) - ExpectStatusCode(t, tst.name, tst.responseCode, response) - ExpectBody(t, tst.name, tst.responseBody, response) + response := IssueRequest(s.handler, &tst.req) + ExpectStatusCode(c, tst.name, tst.responseCode, response) + ExpectBody(c, tst.name, tst.responseBody, response) } // The Keep pull manager should have received one good list with 3 // requests on it. for i := 0; i < 3; i++ { - item := <-pullq.NextItem + var item interface{} + select { + case item = <-pullq.NextItem: + case <-time.After(time.Second): + c.Error("timed out") + } if _, ok := item.(PullRequest); !ok { - t.Errorf("item %v could not be parsed as a PullRequest", item) + c.Errorf("item %v could not be parsed as a PullRequest", item) } } - expectChannelEmpty(t, pullq.NextItem) + expectChannelEmpty(c, pullq.NextItem) } // TestTrashHandler @@ -737,13 +958,16 @@ func TestPullHandler(t *testing.T) { // pull list simultaneously. Make sure that none of them return 400 // Bad Request and that replica.Dump() returns a valid list. // -func TestTrashHandler(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestTrashHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + // Replace the router's trashq -- which the worker goroutines + // started by setup() are now receiving from -- with a new + // one, so we can see what the handler sends to it. + trashq := NewWorkQueue() + s.handler.Handler.(*router).trashq = trashq var userToken = "USER TOKEN" - theConfig.systemAuthToken = "DATA MANAGER TOKEN" - - trashq = NewWorkQueue() + s.cluster.SystemRootToken = "DATA MANAGER TOKEN" goodJSON := []byte(`[ { @@ -772,34 +996,34 @@ func TestTrashHandler(t *testing.T) { var testcases = []trashTest{ { "Valid trash list from an ordinary user", - RequestTester{"/trash", userToken, "PUT", goodJSON}, + RequestTester{"/trash", userToken, "PUT", goodJSON, ""}, http.StatusUnauthorized, "Unauthorized\n", }, { "Invalid trash list from an ordinary user", - RequestTester{"/trash", userToken, "PUT", badJSON}, + RequestTester{"/trash", userToken, "PUT", badJSON, ""}, http.StatusUnauthorized, "Unauthorized\n", }, { "Valid trash list from the data manager", - RequestTester{"/trash", theConfig.systemAuthToken, "PUT", goodJSON}, + RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""}, http.StatusOK, "Received 3 trash requests\n", }, { "Invalid trash list from the data manager", - RequestTester{"/trash", theConfig.systemAuthToken, "PUT", badJSON}, + RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""}, http.StatusBadRequest, "", }, } for _, tst := range testcases { - response := IssueRequest(&tst.req) - ExpectStatusCode(t, tst.name, tst.responseCode, response) - ExpectBody(t, tst.name, tst.responseBody, response) + response := IssueRequest(s.handler, &tst.req) + ExpectStatusCode(c, tst.name, tst.responseCode, response) + ExpectBody(c, tst.name, tst.responseBody, response) } // The trash collector should have received one good list with 3 @@ -807,11 +1031,11 @@ func TestTrashHandler(t *testing.T) { for i := 0; i < 3; i++ { item := <-trashq.NextItem if _, ok := item.(TrashRequest); !ok { - t.Errorf("item %v could not be parsed as a TrashRequest", item) + c.Errorf("item %v could not be parsed as a TrashRequest", item) } } - expectChannelEmpty(t, trashq.NextItem) + expectChannelEmpty(c, trashq.NextItem) } // ==================== @@ -820,75 +1044,71 @@ func TestTrashHandler(t *testing.T) { // IssueTestRequest executes an HTTP request described by rt, to a // REST router. It returns the HTTP response to the request. -func IssueRequest(rt *RequestTester) *httptest.ResponseRecorder { +func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder { response := httptest.NewRecorder() body := bytes.NewReader(rt.requestBody) req, _ := http.NewRequest(rt.method, rt.uri, body) if rt.apiToken != "" { req.Header.Set("Authorization", "OAuth2 "+rt.apiToken) } - loggingRouter := MakeRESTRouter(testCluster) - loggingRouter.ServeHTTP(response, req) + if rt.storageClasses != "" { + req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses) + } + handler.ServeHTTP(response, req) return response } -func IssueHealthCheckRequest(rt *RequestTester) *httptest.ResponseRecorder { +func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder { response := httptest.NewRecorder() body := bytes.NewReader(rt.requestBody) req, _ := http.NewRequest(rt.method, rt.uri, body) if rt.apiToken != "" { req.Header.Set("Authorization", "Bearer "+rt.apiToken) } - loggingRouter := MakeRESTRouter(testCluster) - loggingRouter.ServeHTTP(response, req) + handler.ServeHTTP(response, req) return response } // ExpectStatusCode checks whether a response has the specified status code, // and reports a test failure if not. func ExpectStatusCode( - t *testing.T, + c *check.C, testname string, expectedStatus int, response *httptest.ResponseRecorder) { - if response.Code != expectedStatus { - t.Errorf("%s: expected status %d, got %+v", - testname, expectedStatus, response) - } + c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname)) } func ExpectBody( - t *testing.T, + c *check.C, testname string, expectedBody string, response *httptest.ResponseRecorder) { if expectedBody != "" && response.Body.String() != expectedBody { - t.Errorf("%s: expected response body '%s', got %+v", + c.Errorf("%s: expected response body '%s', got %+v", testname, expectedBody, response) } } // See #7121 -func TestPutNeedsOnlyOneBuffer(t *testing.T) { - defer teardown() - KeepVM = MakeTestVolumeManager(1) - defer KeepVM.Close() +func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) defer func(orig *bufferPool) { bufs = orig }(bufs) - bufs = newBufferPool(1, BlockSize) + bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize) ok := make(chan struct{}) go func() { for i := 0; i < 2; i++ { - response := IssueRequest( + response := IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: "/" + TestHash, requestBody: TestBlock, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "TestPutNeedsOnlyOneBuffer", http.StatusOK, response) } ok <- struct{}{} @@ -897,34 +1117,30 @@ func TestPutNeedsOnlyOneBuffer(t *testing.T) { select { case <-ok: case <-time.After(time.Second): - t.Fatal("PUT deadlocks with MaxBuffers==1") + c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1") } } // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource // leak. -func TestPutHandlerNoBufferleak(t *testing.T) { - defer teardown() - - // Prepare two test Keep volumes. - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() +func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) ok := make(chan bool) go func() { - for i := 0; i < theConfig.MaxBuffers+1; i++ { + for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ { // Unauthenticated request, no server key // => OK (unsigned response) unsignedLocator := "/" + TestHash - response := IssueRequest( + response := IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: unsignedLocator, requestBody: TestBlock, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "TestPutHandlerBufferleak", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "TestPutHandlerBufferleak", TestHashPutResp, response) } @@ -933,98 +1149,72 @@ func TestPutHandlerNoBufferleak(t *testing.T) { select { case <-time.After(20 * time.Second): // If the buffer pool leaks, the test goroutine hangs. - t.Fatal("test did not finish, assuming pool leaked") + c.Fatal("test did not finish, assuming pool leaked") case <-ok: } } -type notifyingResponseRecorder struct { - *httptest.ResponseRecorder - closer chan bool -} - -func (r *notifyingResponseRecorder) CloseNotify() <-chan bool { - return r.closer -} - -func TestGetHandlerClientDisconnect(t *testing.T) { - defer func(was bool) { - theConfig.RequireSignatures = was - }(theConfig.RequireSignatures) - theConfig.RequireSignatures = false +func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) { + s.cluster.Collections.BlobSigning = false + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) defer func(orig *bufferPool) { bufs = orig }(bufs) - bufs = newBufferPool(1, BlockSize) + bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize) defer bufs.Put(bufs.Get(BlockSize)) - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() - - if err := KeepVM.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil { - t.Error(err) - } - - resp := ¬ifyingResponseRecorder{ - ResponseRecorder: httptest.NewRecorder(), - closer: make(chan bool, 1), - } - if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok { - t.Fatal("notifyingResponseRecorder is broken") - } - // If anyone asks, the client has disconnected. - resp.closer <- true + err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock) + c.Assert(err, check.IsNil) + resp := httptest.NewRecorder() ok := make(chan struct{}) go func() { - req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil) - MakeRESTRouter(testCluster).ServeHTTP(resp, req) + ctx, cancel := context.WithCancel(context.Background()) + req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil) + cancel() + s.handler.ServeHTTP(resp, req) ok <- struct{}{} }() select { case <-time.After(20 * time.Second): - t.Fatal("request took >20s, close notifier must be broken") + c.Fatal("request took >20s, close notifier must be broken") case <-ok: } - ExpectStatusCode(t, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder) - for i, v := range KeepVM.AllWritable() { - if calls := v.(*MockVolume).called["GET"]; calls != 0 { - t.Errorf("volume %d got %d calls, expected 0", i, calls) + ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp) + for i, v := range s.handler.volmgr.AllWritable() { + if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 { + c.Errorf("volume %d got %d calls, expected 0", i, calls) } } } // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource // leak. -func TestGetHandlerNoBufferLeak(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - // Prepare two test Keep volumes. Our block is stored on the second volume. - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() - - vols := KeepVM.AllWritable() + vols := s.handler.volmgr.AllWritable() if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil { - t.Error(err) + c.Error(err) } ok := make(chan bool) go func() { - for i := 0; i < theConfig.MaxBuffers+1; i++ { + for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ { // Unauthenticated request, unsigned locator // => OK unsignedLocator := "/" + TestHash - response := IssueRequest( + response := IssueRequest(s.handler, &RequestTester{ method: "GET", uri: unsignedLocator, }) - ExpectStatusCode(t, + ExpectStatusCode(c, "Unauthenticated request, unsigned locator", http.StatusOK, response) - ExpectBody(t, + ExpectBody(c, "Unauthenticated request, unsigned locator", string(TestBlock), response) @@ -1034,45 +1224,97 @@ func TestGetHandlerNoBufferLeak(t *testing.T) { select { case <-time.After(20 * time.Second): // If the buffer pool leaks, the test goroutine hangs. - t.Fatal("test did not finish, assuming pool leaked") + c.Fatal("test did not finish, assuming pool leaked") case <-ok: } } -func TestPutReplicationHeader(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestPutStorageClasses(c *check.C) { + s.cluster.Volumes = map[string]arvados.Volume{ + "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit + "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}}, + "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true}, + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) + rt := RequestTester{ + method: "PUT", + uri: "/" + TestHash, + requestBody: TestBlock, + } + + for _, trial := range []struct { + ask string + expect string + }{ + {"", ""}, + {"default", "default=1"}, + {" , default , default , ", "default=1"}, + {"special", "extra=1, special=1"}, + {"special, readonly", "extra=1, special=1"}, + {"special, nonexistent", "extra=1, special=1"}, + {"extra, special", "extra=1, special=1"}, + {"default, special", "default=1, extra=1, special=1"}, + } { + c.Logf("success case %#v", trial) + rt.storageClasses = trial.ask + resp := IssueRequest(s.handler, &rt) + if trial.expect == "" { + // any non-empty value is correct + c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "") + } else { + c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect) + } + } + + for _, trial := range []struct { + ask string + }{ + {"doesnotexist"}, + {"doesnotexist, readonly"}, + {"readonly"}, + } { + c.Logf("failure case %#v", trial) + rt.storageClasses = trial.ask + resp := IssueRequest(s.handler, &rt) + c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable) + } +} + +func sortCommaSeparated(s string) string { + slice := strings.Split(s, ", ") + sort.Strings(slice) + return strings.Join(slice, ", ") +} - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() +func (s *HandlerSuite) TestPutResponseHeader(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) - resp := IssueRequest(&RequestTester{ + resp := IssueRequest(s.handler, &RequestTester{ method: "PUT", uri: "/" + TestHash, requestBody: TestBlock, }) - if r := resp.Header().Get("X-Keep-Replicas-Stored"); r != "1" { - t.Errorf("Got X-Keep-Replicas-Stored: %q, expected %q", r, "1") - } + c.Logf("%#v", resp) + c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1") + c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1") } -func TestUntrashHandler(t *testing.T) { - defer teardown() +func (s *HandlerSuite) TestUntrashHandler(c *check.C) { + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) // Set up Keep volumes - KeepVM = MakeTestVolumeManager(2) - defer KeepVM.Close() - vols := KeepVM.AllWritable() + vols := s.handler.volmgr.AllWritable() vols[0].Put(context.Background(), TestHash, TestBlock) - theConfig.systemAuthToken = "DATA MANAGER TOKEN" + s.cluster.SystemRootToken = "DATA MANAGER TOKEN" // unauthenticatedReq => UnauthorizedError unauthenticatedReq := &RequestTester{ method: "PUT", uri: "/untrash/" + TestHash, } - response := IssueRequest(unauthenticatedReq) - ExpectStatusCode(t, + response := IssueRequest(s.handler, unauthenticatedReq) + ExpectStatusCode(c, "Unauthenticated request", UnauthorizedError.HTTPCode, response) @@ -1084,8 +1326,8 @@ func TestUntrashHandler(t *testing.T) { apiToken: knownToken, } - response = IssueRequest(notDataManagerReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, notDataManagerReq) + ExpectStatusCode(c, "Non-datamanager token", UnauthorizedError.HTTPCode, response) @@ -1094,10 +1336,10 @@ func TestUntrashHandler(t *testing.T) { datamanagerWithBadHashReq := &RequestTester{ method: "PUT", uri: "/untrash/thisisnotalocator", - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } - response = IssueRequest(datamanagerWithBadHashReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, datamanagerWithBadHashReq) + ExpectStatusCode(c, "Bad locator in untrash request", http.StatusBadRequest, response) @@ -1106,10 +1348,10 @@ func TestUntrashHandler(t *testing.T) { datamanagerWrongMethodReq := &RequestTester{ method: "GET", uri: "/untrash/" + TestHash, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } - response = IssueRequest(datamanagerWrongMethodReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, datamanagerWrongMethodReq) + ExpectStatusCode(c, "Only PUT method is supported for untrash", http.StatusMethodNotAllowed, response) @@ -1118,60 +1360,52 @@ func TestUntrashHandler(t *testing.T) { datamanagerReq := &RequestTester{ method: "PUT", uri: "/untrash/" + TestHash, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } - response = IssueRequest(datamanagerReq) - ExpectStatusCode(t, + response = IssueRequest(s.handler, datamanagerReq) + ExpectStatusCode(c, "", http.StatusOK, response) - expected := "Successfully untrashed on: [MockVolume],[MockVolume]" - if response.Body.String() != expected { - t.Errorf( - "Untrash response mismatched: expected %s, got:\n%s", - expected, response.Body.String()) - } + c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n") } -func TestUntrashHandlerWithNoWritableVolumes(t *testing.T) { - defer teardown() - - // Set up readonly Keep volumes - vols := []*MockVolume{CreateMockVolume(), CreateMockVolume()} - vols[0].Readonly = true - vols[1].Readonly = true - KeepVM = MakeRRVolumeManager([]Volume{vols[0], vols[1]}) - defer KeepVM.Close() - - theConfig.systemAuthToken = "DATA MANAGER TOKEN" +func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) { + // Change all volumes to read-only + for uuid, v := range s.cluster.Volumes { + v.ReadOnly = true + s.cluster.Volumes[uuid] = v + } + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) // datamanagerReq => StatusOK datamanagerReq := &RequestTester{ method: "PUT", uri: "/untrash/" + TestHash, - apiToken: theConfig.systemAuthToken, + apiToken: s.cluster.SystemRootToken, } - response := IssueRequest(datamanagerReq) - ExpectStatusCode(t, + response := IssueRequest(s.handler, datamanagerReq) + ExpectStatusCode(c, "No writable volumes", http.StatusNotFound, response) } -func TestHealthCheckPing(t *testing.T) { - theConfig.ManagementToken = arvadostest.ManagementToken +func (s *HandlerSuite) TestHealthCheckPing(c *check.C) { + s.cluster.ManagementToken = arvadostest.ManagementToken + c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil) pingReq := &RequestTester{ method: "GET", uri: "/_health/ping", apiToken: arvadostest.ManagementToken, } - response := IssueHealthCheckRequest(pingReq) - ExpectStatusCode(t, + response := IssueHealthCheckRequest(s.handler, pingReq) + ExpectStatusCode(c, "", http.StatusOK, response) want := `{"health":"OK"}` if !strings.Contains(response.Body.String(), want) { - t.Errorf("expected response to include %s: got %s", want, response.Body.String()) + c.Errorf("expected response to include %s: got %s", want, response.Body.String()) } }