1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Tests for Keep HTTP handlers:
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
28 "git.arvados.org/arvados.git/lib/config"
29 "git.arvados.org/arvados.git/sdk/go/arvados"
30 "git.arvados.org/arvados.git/sdk/go/arvadostest"
31 "git.arvados.org/arvados.git/sdk/go/ctxlog"
32 "github.com/prometheus/client_golang/prometheus"
33 check "gopkg.in/check.v1"
36 var testServiceURL = func() arvados.URL {
37 return arvados.URL{Host: "localhost:12345", Scheme: "http"}
40 func testCluster(t TB) *arvados.Cluster {
41 cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
45 cluster, err := cfg.GetCluster("")
49 cluster.SystemRootToken = arvadostest.SystemRootToken
50 cluster.ManagementToken = arvadostest.ManagementToken
51 cluster.Collections.BlobSigning = false
55 var _ = check.Suite(&HandlerSuite{})
57 type HandlerSuite struct {
58 cluster *arvados.Cluster
62 func (s *HandlerSuite) SetUpTest(c *check.C) {
63 s.cluster = testCluster(c)
64 s.cluster.Volumes = map[string]arvados.Volume{
65 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
66 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
68 s.handler = &handler{}
71 // A RequestTester represents the parameters for an HTTP request to
72 // be issued on behalf of a unit test.
73 type RequestTester struct {
80 // Test GetBlockHandler on the following situations:
81 // - permissions off, unauthenticated request, unsigned locator
82 // - permissions on, authenticated request, signed locator
83 // - permissions on, authenticated request, unsigned locator
84 // - permissions on, unauthenticated request, signed locator
85 // - permissions on, authenticated request, expired locator
86 // - permissions on, authenticated request, signed locator, transient error from backend
88 func (s *HandlerSuite) TestGetHandler(c *check.C) {
89 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
91 vols := s.handler.volmgr.AllWritable()
92 err := vols[0].Put(context.Background(), TestHash, TestBlock)
93 c.Check(err, check.IsNil)
95 // Create locators for testing.
96 // Turn on permission settings so we can generate signed locators.
97 s.cluster.Collections.BlobSigning = true
98 s.cluster.Collections.BlobSigningKey = knownKey
99 s.cluster.Collections.BlobSigningTTL.Set("5m")
102 unsignedLocator = "/" + TestHash
103 validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
104 expiredTimestamp = time.Now().Add(-time.Hour)
105 signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
106 expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
110 // Test unauthenticated request with permissions off.
111 s.cluster.Collections.BlobSigning = false
113 // Unauthenticated request, unsigned locator
115 response := IssueRequest(s.handler,
118 uri: unsignedLocator,
121 "Unauthenticated request, unsigned locator", http.StatusOK, response)
123 "Unauthenticated request, unsigned locator",
127 receivedLen := response.Header().Get("Content-Length")
128 expectedLen := fmt.Sprintf("%d", len(TestBlock))
129 if receivedLen != expectedLen {
130 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
135 s.cluster.Collections.BlobSigning = true
137 // Authenticated request, signed locator
139 response = IssueRequest(s.handler, &RequestTester{
142 apiToken: knownToken,
145 "Authenticated request, signed locator", http.StatusOK, response)
147 "Authenticated request, signed locator", string(TestBlock), response)
149 receivedLen = response.Header().Get("Content-Length")
150 expectedLen = fmt.Sprintf("%d", len(TestBlock))
151 if receivedLen != expectedLen {
152 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
155 // Authenticated request, unsigned locator
156 // => PermissionError
157 response = IssueRequest(s.handler, &RequestTester{
159 uri: unsignedLocator,
160 apiToken: knownToken,
162 ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
164 // Unauthenticated request, signed locator
165 // => PermissionError
166 response = IssueRequest(s.handler, &RequestTester{
171 "Unauthenticated request, signed locator",
172 PermissionError.HTTPCode, response)
174 // Authenticated request, expired locator
176 response = IssueRequest(s.handler, &RequestTester{
179 apiToken: knownToken,
182 "Authenticated request, expired locator",
183 ExpiredError.HTTPCode, response)
185 // Authenticated request, signed locator
186 // => 503 Server busy (transient error)
188 // Set up the block owning volume to respond with errors
189 vols[0].Volume.(*MockVolume).Bad = true
190 vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
191 response = IssueRequest(s.handler, &RequestTester{
194 apiToken: knownToken,
196 // A transient error from one volume while the other doesn't find the block
197 // should make the service return a 503 so that clients can retry.
199 "Volume backend busy",
203 // Test PutBlockHandler on the following situations:
205 // - with server key, authenticated request, unsigned locator
206 // - with server key, unauthenticated request, unsigned locator
208 func (s *HandlerSuite) TestPutHandler(c *check.C) {
209 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
214 s.cluster.Collections.BlobSigningKey = ""
216 // Unauthenticated request, no server key
217 // => OK (unsigned response)
218 unsignedLocator := "/" + TestHash
219 response := IssueRequest(s.handler,
222 uri: unsignedLocator,
223 requestBody: TestBlock,
227 "Unauthenticated request, no server key", http.StatusOK, response)
229 "Unauthenticated request, no server key",
230 TestHashPutResp, response)
232 // ------------------
233 // With a server key.
235 s.cluster.Collections.BlobSigningKey = knownKey
236 s.cluster.Collections.BlobSigningTTL.Set("5m")
238 // When a permission key is available, the locator returned
239 // from an authenticated PUT request will be signed.
241 // Authenticated PUT, signed locator
242 // => OK (signed response)
243 response = IssueRequest(s.handler,
246 uri: unsignedLocator,
247 requestBody: TestBlock,
248 apiToken: knownToken,
252 "Authenticated PUT, signed locator, with server key",
253 http.StatusOK, response)
254 responseLocator := strings.TrimSpace(response.Body.String())
255 if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
256 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
257 "response '%s' does not contain a valid signature",
261 // Unauthenticated PUT, unsigned locator
263 response = IssueRequest(s.handler,
266 uri: unsignedLocator,
267 requestBody: TestBlock,
271 "Unauthenticated PUT, unsigned locator, with server key",
272 http.StatusOK, response)
274 "Unauthenticated PUT, unsigned locator, with server key",
275 TestHashPutResp, response)
278 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
279 s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
280 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
282 s.cluster.SystemRootToken = "fake-data-manager-token"
283 IssueRequest(s.handler,
287 requestBody: TestBlock,
290 s.cluster.Collections.BlobTrash = true
291 IssueRequest(s.handler,
295 requestBody: TestBlock,
296 apiToken: s.cluster.SystemRootToken,
303 for _, e := range []expect{
304 {"zzzzz-nyw5e-000000000000000", "Get", 0},
305 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
306 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
307 {"zzzzz-nyw5e-000000000000000", "Put", 0},
308 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
309 {"zzzzz-nyw5e-111111111111111", "Get", 0},
310 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
311 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
312 {"zzzzz-nyw5e-111111111111111", "Put", 1},
313 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
315 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
316 c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
321 // Test TOUCH requests.
322 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
323 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
324 vols := s.handler.volmgr.AllWritable()
325 vols[0].Put(context.Background(), TestHash, TestBlock)
326 vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
327 afterPut := time.Now()
328 t, err := vols[0].Mtime(TestHash)
329 c.Assert(err, check.IsNil)
330 c.Assert(t.Before(afterPut), check.Equals, true)
333 "touch with no credentials",
334 http.StatusUnauthorized,
335 IssueRequest(s.handler, &RequestTester{
341 "touch with non-root credentials",
342 http.StatusUnauthorized,
343 IssueRequest(s.handler, &RequestTester{
346 apiToken: arvadostest.ActiveTokenV2,
350 "touch non-existent block",
352 IssueRequest(s.handler, &RequestTester{
354 uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
355 apiToken: s.cluster.SystemRootToken,
358 beforeTouch := time.Now()
362 IssueRequest(s.handler, &RequestTester{
365 apiToken: s.cluster.SystemRootToken,
367 t, err = vols[0].Mtime(TestHash)
368 c.Assert(err, check.IsNil)
369 c.Assert(t.After(beforeTouch), check.Equals, true)
372 // Test /index requests:
373 // - unauthenticated /index request
374 // - unauthenticated /index/prefix request
375 // - authenticated /index request | non-superuser
376 // - authenticated /index/prefix request | non-superuser
377 // - authenticated /index request | superuser
378 // - authenticated /index/prefix request | superuser
380 // The only /index requests that should succeed are those issued by the
381 // superuser. They should pass regardless of the value of BlobSigning.
383 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
384 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
386 // Include multiple blocks on different volumes, and
387 // some metadata files (which should be omitted from index listings)
388 vols := s.handler.volmgr.AllWritable()
389 vols[0].Put(context.Background(), TestHash, TestBlock)
390 vols[1].Put(context.Background(), TestHash2, TestBlock2)
391 vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
392 vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
394 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
396 unauthenticatedReq := &RequestTester{
400 authenticatedReq := &RequestTester{
403 apiToken: knownToken,
405 superuserReq := &RequestTester{
408 apiToken: s.cluster.SystemRootToken,
410 unauthPrefixReq := &RequestTester{
412 uri: "/index/" + TestHash[0:3],
414 authPrefixReq := &RequestTester{
416 uri: "/index/" + TestHash[0:3],
417 apiToken: knownToken,
419 superuserPrefixReq := &RequestTester{
421 uri: "/index/" + TestHash[0:3],
422 apiToken: s.cluster.SystemRootToken,
424 superuserNoSuchPrefixReq := &RequestTester{
427 apiToken: s.cluster.SystemRootToken,
429 superuserInvalidPrefixReq := &RequestTester{
432 apiToken: s.cluster.SystemRootToken,
435 // -------------------------------------------------------------
436 // Only the superuser should be allowed to issue /index requests.
438 // ---------------------------
439 // BlobSigning enabled
440 // This setting should not affect tests passing.
441 s.cluster.Collections.BlobSigning = true
443 // unauthenticated /index request
444 // => UnauthorizedError
445 response := IssueRequest(s.handler, unauthenticatedReq)
447 "permissions on, unauthenticated request",
448 UnauthorizedError.HTTPCode,
451 // unauthenticated /index/prefix request
452 // => UnauthorizedError
453 response = IssueRequest(s.handler, unauthPrefixReq)
455 "permissions on, unauthenticated /index/prefix request",
456 UnauthorizedError.HTTPCode,
459 // authenticated /index request, non-superuser
460 // => UnauthorizedError
461 response = IssueRequest(s.handler, authenticatedReq)
463 "permissions on, authenticated request, non-superuser",
464 UnauthorizedError.HTTPCode,
467 // authenticated /index/prefix request, non-superuser
468 // => UnauthorizedError
469 response = IssueRequest(s.handler, authPrefixReq)
471 "permissions on, authenticated /index/prefix request, non-superuser",
472 UnauthorizedError.HTTPCode,
475 // superuser /index request
477 response = IssueRequest(s.handler, superuserReq)
479 "permissions on, superuser request",
483 // ----------------------------
484 // BlobSigning disabled
485 // Valid Request should still pass.
486 s.cluster.Collections.BlobSigning = false
488 // superuser /index request
490 response = IssueRequest(s.handler, superuserReq)
492 "permissions on, superuser request",
496 expected := `^` + TestHash + `\+\d+ \d+\n` +
497 TestHash2 + `\+\d+ \d+\n\n$`
498 match, _ := regexp.MatchString(expected, response.Body.String())
501 "permissions on, superuser request: expected %s, got:\n%s",
502 expected, response.Body.String())
505 // superuser /index/prefix request
507 response = IssueRequest(s.handler, superuserPrefixReq)
509 "permissions on, superuser request",
513 expected = `^` + TestHash + `\+\d+ \d+\n\n$`
514 match, _ = regexp.MatchString(expected, response.Body.String())
517 "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
518 expected, response.Body.String())
521 // superuser /index/{no-such-prefix} request
523 response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
525 "permissions on, superuser request",
529 if "\n" != response.Body.String() {
530 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
533 // superuser /index/{invalid-prefix} request
534 // => StatusBadRequest
535 response = IssueRequest(s.handler, superuserInvalidPrefixReq)
537 "permissions on, superuser request",
538 http.StatusBadRequest,
546 // With no token and with a non-data-manager token:
547 // * Delete existing block
548 // (test for 403 Forbidden, confirm block not deleted)
550 // With data manager token:
552 // * Delete existing block
553 // (test for 200 OK, response counts, confirm block deleted)
555 // * Delete nonexistent block
556 // (test for 200 OK, response counts)
560 // * Delete block on read-only and read-write volume
561 // (test for 200 OK, response with copies_deleted=1,
562 // copies_failed=1, confirm block deleted only on r/w volume)
564 // * Delete block on read-only volume only
565 // (test for 200 OK, response with copies_deleted=0, copies_failed=1,
566 // confirm block not deleted)
568 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
569 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
571 vols := s.handler.volmgr.AllWritable()
572 vols[0].Put(context.Background(), TestHash, TestBlock)
574 // Explicitly set the BlobSigningTTL to 0 for these
575 // tests, to ensure the MockVolume deletes the blocks
576 // even though they have just been created.
577 s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
579 var userToken = "NOT DATA MANAGER TOKEN"
580 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
582 s.cluster.Collections.BlobTrash = true
584 unauthReq := &RequestTester{
589 userReq := &RequestTester{
595 superuserExistingBlockReq := &RequestTester{
598 apiToken: s.cluster.SystemRootToken,
601 superuserNonexistentBlockReq := &RequestTester{
603 uri: "/" + TestHash2,
604 apiToken: s.cluster.SystemRootToken,
607 // Unauthenticated request returns PermissionError.
608 var response *httptest.ResponseRecorder
609 response = IssueRequest(s.handler, unauthReq)
611 "unauthenticated request",
612 PermissionError.HTTPCode,
615 // Authenticated non-admin request returns PermissionError.
616 response = IssueRequest(s.handler, userReq)
618 "authenticated non-admin request",
619 PermissionError.HTTPCode,
622 // Authenticated admin request for nonexistent block.
623 type deletecounter struct {
624 Deleted int `json:"copies_deleted"`
625 Failed int `json:"copies_failed"`
627 var responseDc, expectedDc deletecounter
629 response = IssueRequest(s.handler, superuserNonexistentBlockReq)
631 "data manager request, nonexistent block",
635 // Authenticated admin request for existing block while BlobTrash is false.
636 s.cluster.Collections.BlobTrash = false
637 response = IssueRequest(s.handler, superuserExistingBlockReq)
639 "authenticated request, existing block, method disabled",
640 MethodDisabledError.HTTPCode,
642 s.cluster.Collections.BlobTrash = true
644 // Authenticated admin request for existing block.
645 response = IssueRequest(s.handler, superuserExistingBlockReq)
647 "data manager request, existing block",
650 // Expect response {"copies_deleted":1,"copies_failed":0}
651 expectedDc = deletecounter{1, 0}
652 json.NewDecoder(response.Body).Decode(&responseDc)
653 if responseDc != expectedDc {
654 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
655 expectedDc, responseDc)
657 // Confirm the block has been deleted
658 buf := make([]byte, BlockSize)
659 _, err := vols[0].Get(context.Background(), TestHash, buf)
660 var blockDeleted = os.IsNotExist(err)
662 c.Error("superuserExistingBlockReq: block not deleted")
665 // A DELETE request on a block newer than BlobSigningTTL
666 // should return success but leave the block on the volume.
667 vols[0].Put(context.Background(), TestHash, TestBlock)
668 s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
670 response = IssueRequest(s.handler, superuserExistingBlockReq)
672 "data manager request, existing block",
675 // Expect response {"copies_deleted":1,"copies_failed":0}
676 expectedDc = deletecounter{1, 0}
677 json.NewDecoder(response.Body).Decode(&responseDc)
678 if responseDc != expectedDc {
679 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
680 expectedDc, responseDc)
682 // Confirm the block has NOT been deleted.
683 _, err = vols[0].Get(context.Background(), TestHash, buf)
685 c.Errorf("testing delete on new block: %s\n", err)
691 // Test handling of the PUT /pull statement.
693 // Cases tested: syntactically valid and invalid pull lists, from the
694 // data manager and from unprivileged users:
696 // 1. Valid pull list from an ordinary user
697 // (expected result: 401 Unauthorized)
699 // 2. Invalid pull request from an ordinary user
700 // (expected result: 401 Unauthorized)
702 // 3. Valid pull request from the data manager
703 // (expected result: 200 OK with request body "Received 3 pull
706 // 4. Invalid pull request from the data manager
707 // (expected result: 400 Bad Request)
709 // Test that in the end, the pull manager received a good pull list with
710 // the expected number of requests.
712 // TODO(twp): test concurrency: launch 100 goroutines to update the
713 // pull list simultaneously. Make sure that none of them return 400
714 // Bad Request and that pullq.GetList() returns a valid list.
716 func (s *HandlerSuite) TestPullHandler(c *check.C) {
717 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
719 // Replace the router's pullq -- which the worker goroutines
720 // started by setup() are now receiving from -- with a new
721 // one, so we can see what the handler sends to it.
722 pullq := NewWorkQueue()
723 s.handler.Handler.(*router).pullq = pullq
725 var userToken = "USER TOKEN"
726 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
728 goodJSON := []byte(`[
730 "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
737 "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
741 "locator":"cccccccccccccccccccccccccccccccc+12345",
742 "servers":["http://server1"]
746 badJSON := []byte(`{ "key":"I'm a little teapot" }`)
748 type pullTest struct {
754 var testcases = []pullTest{
756 "Valid pull list from an ordinary user",
757 RequestTester{"/pull", userToken, "PUT", goodJSON},
758 http.StatusUnauthorized,
762 "Invalid pull request from an ordinary user",
763 RequestTester{"/pull", userToken, "PUT", badJSON},
764 http.StatusUnauthorized,
768 "Valid pull request from the data manager",
769 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON},
771 "Received 3 pull requests\n",
774 "Invalid pull request from the data manager",
775 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON},
776 http.StatusBadRequest,
781 for _, tst := range testcases {
782 response := IssueRequest(s.handler, &tst.req)
783 ExpectStatusCode(c, tst.name, tst.responseCode, response)
784 ExpectBody(c, tst.name, tst.responseBody, response)
787 // The Keep pull manager should have received one good list with 3
789 for i := 0; i < 3; i++ {
792 case item = <-pullq.NextItem:
793 case <-time.After(time.Second):
796 if _, ok := item.(PullRequest); !ok {
797 c.Errorf("item %v could not be parsed as a PullRequest", item)
801 expectChannelEmpty(c, pullq.NextItem)
808 // Cases tested: syntactically valid and invalid trash lists, from the
809 // data manager and from unprivileged users:
811 // 1. Valid trash list from an ordinary user
812 // (expected result: 401 Unauthorized)
814 // 2. Invalid trash list from an ordinary user
815 // (expected result: 401 Unauthorized)
817 // 3. Valid trash list from the data manager
818 // (expected result: 200 OK with request body "Received 3 trash
821 // 4. Invalid trash list from the data manager
822 // (expected result: 400 Bad Request)
824 // Test that in the end, the trash collector received a good list
825 // trash list with the expected number of requests.
827 // TODO(twp): test concurrency: launch 100 goroutines to update the
828 // pull list simultaneously. Make sure that none of them return 400
829 // Bad Request and that replica.Dump() returns a valid list.
831 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
832 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
833 // Replace the router's trashq -- which the worker goroutines
834 // started by setup() are now receiving from -- with a new
835 // one, so we can see what the handler sends to it.
836 trashq := NewWorkQueue()
837 s.handler.Handler.(*router).trashq = trashq
839 var userToken = "USER TOKEN"
840 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
842 goodJSON := []byte(`[
845 "block_mtime":1409082153
849 "block_mtime":1409082153
853 "block_mtime":1409082153
857 badJSON := []byte(`I am not a valid JSON string`)
859 type trashTest struct {
866 var testcases = []trashTest{
868 "Valid trash list from an ordinary user",
869 RequestTester{"/trash", userToken, "PUT", goodJSON},
870 http.StatusUnauthorized,
874 "Invalid trash list from an ordinary user",
875 RequestTester{"/trash", userToken, "PUT", badJSON},
876 http.StatusUnauthorized,
880 "Valid trash list from the data manager",
881 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON},
883 "Received 3 trash requests\n",
886 "Invalid trash list from the data manager",
887 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON},
888 http.StatusBadRequest,
893 for _, tst := range testcases {
894 response := IssueRequest(s.handler, &tst.req)
895 ExpectStatusCode(c, tst.name, tst.responseCode, response)
896 ExpectBody(c, tst.name, tst.responseBody, response)
899 // The trash collector should have received one good list with 3
901 for i := 0; i < 3; i++ {
902 item := <-trashq.NextItem
903 if _, ok := item.(TrashRequest); !ok {
904 c.Errorf("item %v could not be parsed as a TrashRequest", item)
908 expectChannelEmpty(c, trashq.NextItem)
911 // ====================
913 // ====================
915 // IssueTestRequest executes an HTTP request described by rt, to a
916 // REST router. It returns the HTTP response to the request.
917 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
918 response := httptest.NewRecorder()
919 body := bytes.NewReader(rt.requestBody)
920 req, _ := http.NewRequest(rt.method, rt.uri, body)
921 if rt.apiToken != "" {
922 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
924 handler.ServeHTTP(response, req)
928 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
929 response := httptest.NewRecorder()
930 body := bytes.NewReader(rt.requestBody)
931 req, _ := http.NewRequest(rt.method, rt.uri, body)
932 if rt.apiToken != "" {
933 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
935 handler.ServeHTTP(response, req)
939 // ExpectStatusCode checks whether a response has the specified status code,
940 // and reports a test failure if not.
941 func ExpectStatusCode(
945 response *httptest.ResponseRecorder) {
946 c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
953 response *httptest.ResponseRecorder) {
954 if expectedBody != "" && response.Body.String() != expectedBody {
955 c.Errorf("%s: expected response body '%s', got %+v",
956 testname, expectedBody, response)
961 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
962 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
964 defer func(orig *bufferPool) {
967 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
969 ok := make(chan struct{})
971 for i := 0; i < 2; i++ {
972 response := IssueRequest(s.handler,
976 requestBody: TestBlock,
979 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
986 case <-time.After(time.Second):
987 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
991 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
993 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
994 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
996 ok := make(chan bool)
998 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
999 // Unauthenticated request, no server key
1000 // => OK (unsigned response)
1001 unsignedLocator := "/" + TestHash
1002 response := IssueRequest(s.handler,
1005 uri: unsignedLocator,
1006 requestBody: TestBlock,
1009 "TestPutHandlerBufferleak", http.StatusOK, response)
1011 "TestPutHandlerBufferleak",
1012 TestHashPutResp, response)
1017 case <-time.After(20 * time.Second):
1018 // If the buffer pool leaks, the test goroutine hangs.
1019 c.Fatal("test did not finish, assuming pool leaked")
1024 type notifyingResponseRecorder struct {
1025 *httptest.ResponseRecorder
1029 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1033 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1034 s.cluster.Collections.BlobSigning = false
1035 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1037 defer func(orig *bufferPool) {
1040 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1041 defer bufs.Put(bufs.Get(BlockSize))
1043 if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1047 resp := ¬ifyingResponseRecorder{
1048 ResponseRecorder: httptest.NewRecorder(),
1049 closer: make(chan bool, 1),
1051 if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1052 c.Fatal("notifyingResponseRecorder is broken")
1054 // If anyone asks, the client has disconnected.
1057 ok := make(chan struct{})
1059 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1060 s.handler.ServeHTTP(resp, req)
1065 case <-time.After(20 * time.Second):
1066 c.Fatal("request took >20s, close notifier must be broken")
1070 ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1071 for i, v := range s.handler.volmgr.AllWritable() {
1072 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1073 c.Errorf("volume %d got %d calls, expected 0", i, calls)
1078 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1080 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1081 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1083 vols := s.handler.volmgr.AllWritable()
1084 if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1088 ok := make(chan bool)
1090 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1091 // Unauthenticated request, unsigned locator
1093 unsignedLocator := "/" + TestHash
1094 response := IssueRequest(s.handler,
1097 uri: unsignedLocator,
1100 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1102 "Unauthenticated request, unsigned locator",
1109 case <-time.After(20 * time.Second):
1110 // If the buffer pool leaks, the test goroutine hangs.
1111 c.Fatal("test did not finish, assuming pool leaked")
1116 func (s *HandlerSuite) TestPutReplicationHeader(c *check.C) {
1117 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1119 resp := IssueRequest(s.handler, &RequestTester{
1121 uri: "/" + TestHash,
1122 requestBody: TestBlock,
1124 if r := resp.Header().Get("X-Keep-Replicas-Stored"); r != "1" {
1126 c.Errorf("Got X-Keep-Replicas-Stored: %q, expected %q", r, "1")
1130 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1131 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1133 // Set up Keep volumes
1134 vols := s.handler.volmgr.AllWritable()
1135 vols[0].Put(context.Background(), TestHash, TestBlock)
1137 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1139 // unauthenticatedReq => UnauthorizedError
1140 unauthenticatedReq := &RequestTester{
1142 uri: "/untrash/" + TestHash,
1144 response := IssueRequest(s.handler, unauthenticatedReq)
1146 "Unauthenticated request",
1147 UnauthorizedError.HTTPCode,
1150 // notDataManagerReq => UnauthorizedError
1151 notDataManagerReq := &RequestTester{
1153 uri: "/untrash/" + TestHash,
1154 apiToken: knownToken,
1157 response = IssueRequest(s.handler, notDataManagerReq)
1159 "Non-datamanager token",
1160 UnauthorizedError.HTTPCode,
1163 // datamanagerWithBadHashReq => StatusBadRequest
1164 datamanagerWithBadHashReq := &RequestTester{
1166 uri: "/untrash/thisisnotalocator",
1167 apiToken: s.cluster.SystemRootToken,
1169 response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1171 "Bad locator in untrash request",
1172 http.StatusBadRequest,
1175 // datamanagerWrongMethodReq => StatusBadRequest
1176 datamanagerWrongMethodReq := &RequestTester{
1178 uri: "/untrash/" + TestHash,
1179 apiToken: s.cluster.SystemRootToken,
1181 response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1183 "Only PUT method is supported for untrash",
1184 http.StatusMethodNotAllowed,
1187 // datamanagerReq => StatusOK
1188 datamanagerReq := &RequestTester{
1190 uri: "/untrash/" + TestHash,
1191 apiToken: s.cluster.SystemRootToken,
1193 response = IssueRequest(s.handler, datamanagerReq)
1198 c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1201 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1202 // Change all volumes to read-only
1203 for uuid, v := range s.cluster.Volumes {
1205 s.cluster.Volumes[uuid] = v
1207 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1209 // datamanagerReq => StatusOK
1210 datamanagerReq := &RequestTester{
1212 uri: "/untrash/" + TestHash,
1213 apiToken: s.cluster.SystemRootToken,
1215 response := IssueRequest(s.handler, datamanagerReq)
1217 "No writable volumes",
1218 http.StatusNotFound,
1222 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1223 s.cluster.ManagementToken = arvadostest.ManagementToken
1224 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1225 pingReq := &RequestTester{
1227 uri: "/_health/ping",
1228 apiToken: arvadostest.ManagementToken,
1230 response := IssueHealthCheckRequest(s.handler, pingReq)
1235 want := `{"health":"OK"}`
1236 if !strings.Contains(response.Body.String(), want) {
1237 c.Errorf("expected response to include %s: got %s", want, response.Body.String())