1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Tests for Keep HTTP handlers:
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
29 "git.arvados.org/arvados.git/lib/config"
30 "git.arvados.org/arvados.git/sdk/go/arvados"
31 "git.arvados.org/arvados.git/sdk/go/arvadostest"
32 "git.arvados.org/arvados.git/sdk/go/ctxlog"
33 "github.com/prometheus/client_golang/prometheus"
34 check "gopkg.in/check.v1"
37 var testServiceURL = func() arvados.URL {
38 return arvados.URL{Host: "localhost:12345", Scheme: "http"}
41 func testCluster(t TB) *arvados.Cluster {
42 cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
46 cluster, err := cfg.GetCluster("")
50 cluster.SystemRootToken = arvadostest.SystemRootToken
51 cluster.ManagementToken = arvadostest.ManagementToken
52 cluster.Collections.BlobSigning = false
56 var _ = check.Suite(&HandlerSuite{})
58 type HandlerSuite struct {
59 cluster *arvados.Cluster
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64 s.cluster = testCluster(c)
65 s.cluster.Volumes = map[string]arvados.Volume{
66 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
69 s.handler = &handler{}
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
82 // Test GetBlockHandler on the following situations:
83 // - permissions off, unauthenticated request, unsigned locator
84 // - permissions on, authenticated request, signed locator
85 // - permissions on, authenticated request, unsigned locator
86 // - permissions on, unauthenticated request, signed locator
87 // - permissions on, authenticated request, expired locator
88 // - permissions on, authenticated request, signed locator, transient error from backend
90 func (s *HandlerSuite) TestGetHandler(c *check.C) {
91 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
93 vols := s.handler.volmgr.AllWritable()
94 err := vols[0].Put(context.Background(), TestHash, TestBlock)
95 c.Check(err, check.IsNil)
97 // Create locators for testing.
98 // Turn on permission settings so we can generate signed locators.
99 s.cluster.Collections.BlobSigning = true
100 s.cluster.Collections.BlobSigningKey = knownKey
101 s.cluster.Collections.BlobSigningTTL.Set("5m")
104 unsignedLocator = "/" + TestHash
105 validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
106 expiredTimestamp = time.Now().Add(-time.Hour)
107 signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
108 expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
112 // Test unauthenticated request with permissions off.
113 s.cluster.Collections.BlobSigning = false
115 // Unauthenticated request, unsigned locator
117 response := IssueRequest(s.handler,
120 uri: unsignedLocator,
123 "Unauthenticated request, unsigned locator", http.StatusOK, response)
125 "Unauthenticated request, unsigned locator",
129 receivedLen := response.Header().Get("Content-Length")
130 expectedLen := fmt.Sprintf("%d", len(TestBlock))
131 if receivedLen != expectedLen {
132 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
137 s.cluster.Collections.BlobSigning = true
139 // Authenticated request, signed locator
141 response = IssueRequest(s.handler, &RequestTester{
144 apiToken: knownToken,
147 "Authenticated request, signed locator", http.StatusOK, response)
149 "Authenticated request, signed locator", string(TestBlock), response)
151 receivedLen = response.Header().Get("Content-Length")
152 expectedLen = fmt.Sprintf("%d", len(TestBlock))
153 if receivedLen != expectedLen {
154 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
157 // Authenticated request, unsigned locator
158 // => PermissionError
159 response = IssueRequest(s.handler, &RequestTester{
161 uri: unsignedLocator,
162 apiToken: knownToken,
164 ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
166 // Unauthenticated request, signed locator
167 // => PermissionError
168 response = IssueRequest(s.handler, &RequestTester{
173 "Unauthenticated request, signed locator",
174 PermissionError.HTTPCode, response)
176 // Authenticated request, expired locator
178 response = IssueRequest(s.handler, &RequestTester{
181 apiToken: knownToken,
184 "Authenticated request, expired locator",
185 ExpiredError.HTTPCode, response)
187 // Authenticated request, signed locator
188 // => 503 Server busy (transient error)
190 // Set up the block owning volume to respond with errors
191 vols[0].Volume.(*MockVolume).Bad = true
192 vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
193 response = IssueRequest(s.handler, &RequestTester{
196 apiToken: knownToken,
198 // A transient error from one volume while the other doesn't find the block
199 // should make the service return a 503 so that clients can retry.
201 "Volume backend busy",
205 // Test PutBlockHandler on the following situations:
207 // - with server key, authenticated request, unsigned locator
208 // - with server key, unauthenticated request, unsigned locator
210 func (s *HandlerSuite) TestPutHandler(c *check.C) {
211 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
216 s.cluster.Collections.BlobSigningKey = ""
218 // Unauthenticated request, no server key
219 // => OK (unsigned response)
220 unsignedLocator := "/" + TestHash
221 response := IssueRequest(s.handler,
224 uri: unsignedLocator,
225 requestBody: TestBlock,
229 "Unauthenticated request, no server key", http.StatusOK, response)
231 "Unauthenticated request, no server key",
232 TestHashPutResp, response)
234 // ------------------
235 // With a server key.
237 s.cluster.Collections.BlobSigningKey = knownKey
238 s.cluster.Collections.BlobSigningTTL.Set("5m")
240 // When a permission key is available, the locator returned
241 // from an authenticated PUT request will be signed.
243 // Authenticated PUT, signed locator
244 // => OK (signed response)
245 response = IssueRequest(s.handler,
248 uri: unsignedLocator,
249 requestBody: TestBlock,
250 apiToken: knownToken,
254 "Authenticated PUT, signed locator, with server key",
255 http.StatusOK, response)
256 responseLocator := strings.TrimSpace(response.Body.String())
257 if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
258 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
259 "response '%s' does not contain a valid signature",
263 // Unauthenticated PUT, unsigned locator
265 response = IssueRequest(s.handler,
268 uri: unsignedLocator,
269 requestBody: TestBlock,
273 "Unauthenticated PUT, unsigned locator, with server key",
274 http.StatusOK, response)
276 "Unauthenticated PUT, unsigned locator, with server key",
277 TestHashPutResp, response)
280 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
281 s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
282 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
284 s.cluster.SystemRootToken = "fake-data-manager-token"
285 IssueRequest(s.handler,
289 requestBody: TestBlock,
292 s.cluster.Collections.BlobTrash = true
293 IssueRequest(s.handler,
297 requestBody: TestBlock,
298 apiToken: s.cluster.SystemRootToken,
305 for _, e := range []expect{
306 {"zzzzz-nyw5e-000000000000000", "Get", 0},
307 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
308 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
309 {"zzzzz-nyw5e-000000000000000", "Put", 0},
310 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
311 {"zzzzz-nyw5e-111111111111111", "Get", 0},
312 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
313 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
314 {"zzzzz-nyw5e-111111111111111", "Put", 1},
315 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
317 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
318 c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
323 // Test TOUCH requests.
324 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
325 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
326 vols := s.handler.volmgr.AllWritable()
327 vols[0].Put(context.Background(), TestHash, TestBlock)
328 vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
329 afterPut := time.Now()
330 t, err := vols[0].Mtime(TestHash)
331 c.Assert(err, check.IsNil)
332 c.Assert(t.Before(afterPut), check.Equals, true)
335 "touch with no credentials",
336 http.StatusUnauthorized,
337 IssueRequest(s.handler, &RequestTester{
343 "touch with non-root credentials",
344 http.StatusUnauthorized,
345 IssueRequest(s.handler, &RequestTester{
348 apiToken: arvadostest.ActiveTokenV2,
352 "touch non-existent block",
354 IssueRequest(s.handler, &RequestTester{
356 uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
357 apiToken: s.cluster.SystemRootToken,
360 beforeTouch := time.Now()
364 IssueRequest(s.handler, &RequestTester{
367 apiToken: s.cluster.SystemRootToken,
369 t, err = vols[0].Mtime(TestHash)
370 c.Assert(err, check.IsNil)
371 c.Assert(t.After(beforeTouch), check.Equals, true)
374 // Test /index requests:
375 // - unauthenticated /index request
376 // - unauthenticated /index/prefix request
377 // - authenticated /index request | non-superuser
378 // - authenticated /index/prefix request | non-superuser
379 // - authenticated /index request | superuser
380 // - authenticated /index/prefix request | superuser
382 // The only /index requests that should succeed are those issued by the
383 // superuser. They should pass regardless of the value of BlobSigning.
385 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
386 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
388 // Include multiple blocks on different volumes, and
389 // some metadata files (which should be omitted from index listings)
390 vols := s.handler.volmgr.AllWritable()
391 vols[0].Put(context.Background(), TestHash, TestBlock)
392 vols[1].Put(context.Background(), TestHash2, TestBlock2)
393 vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
394 vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
396 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
398 unauthenticatedReq := &RequestTester{
402 authenticatedReq := &RequestTester{
405 apiToken: knownToken,
407 superuserReq := &RequestTester{
410 apiToken: s.cluster.SystemRootToken,
412 unauthPrefixReq := &RequestTester{
414 uri: "/index/" + TestHash[0:3],
416 authPrefixReq := &RequestTester{
418 uri: "/index/" + TestHash[0:3],
419 apiToken: knownToken,
421 superuserPrefixReq := &RequestTester{
423 uri: "/index/" + TestHash[0:3],
424 apiToken: s.cluster.SystemRootToken,
426 superuserNoSuchPrefixReq := &RequestTester{
429 apiToken: s.cluster.SystemRootToken,
431 superuserInvalidPrefixReq := &RequestTester{
434 apiToken: s.cluster.SystemRootToken,
437 // -------------------------------------------------------------
438 // Only the superuser should be allowed to issue /index requests.
440 // ---------------------------
441 // BlobSigning enabled
442 // This setting should not affect tests passing.
443 s.cluster.Collections.BlobSigning = true
445 // unauthenticated /index request
446 // => UnauthorizedError
447 response := IssueRequest(s.handler, unauthenticatedReq)
449 "permissions on, unauthenticated request",
450 UnauthorizedError.HTTPCode,
453 // unauthenticated /index/prefix request
454 // => UnauthorizedError
455 response = IssueRequest(s.handler, unauthPrefixReq)
457 "permissions on, unauthenticated /index/prefix request",
458 UnauthorizedError.HTTPCode,
461 // authenticated /index request, non-superuser
462 // => UnauthorizedError
463 response = IssueRequest(s.handler, authenticatedReq)
465 "permissions on, authenticated request, non-superuser",
466 UnauthorizedError.HTTPCode,
469 // authenticated /index/prefix request, non-superuser
470 // => UnauthorizedError
471 response = IssueRequest(s.handler, authPrefixReq)
473 "permissions on, authenticated /index/prefix request, non-superuser",
474 UnauthorizedError.HTTPCode,
477 // superuser /index request
479 response = IssueRequest(s.handler, superuserReq)
481 "permissions on, superuser request",
485 // ----------------------------
486 // BlobSigning disabled
487 // Valid Request should still pass.
488 s.cluster.Collections.BlobSigning = false
490 // superuser /index request
492 response = IssueRequest(s.handler, superuserReq)
494 "permissions on, superuser request",
498 expected := `^` + TestHash + `\+\d+ \d+\n` +
499 TestHash2 + `\+\d+ \d+\n\n$`
500 match, _ := regexp.MatchString(expected, response.Body.String())
503 "permissions on, superuser request: expected %s, got:\n%s",
504 expected, response.Body.String())
507 // superuser /index/prefix request
509 response = IssueRequest(s.handler, superuserPrefixReq)
511 "permissions on, superuser request",
515 expected = `^` + TestHash + `\+\d+ \d+\n\n$`
516 match, _ = regexp.MatchString(expected, response.Body.String())
519 "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
520 expected, response.Body.String())
523 // superuser /index/{no-such-prefix} request
525 response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
527 "permissions on, superuser request",
531 if "\n" != response.Body.String() {
532 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
535 // superuser /index/{invalid-prefix} request
536 // => StatusBadRequest
537 response = IssueRequest(s.handler, superuserInvalidPrefixReq)
539 "permissions on, superuser request",
540 http.StatusBadRequest,
548 // With no token and with a non-data-manager token:
549 // * Delete existing block
550 // (test for 403 Forbidden, confirm block not deleted)
552 // With data manager token:
554 // * Delete existing block
555 // (test for 200 OK, response counts, confirm block deleted)
557 // * Delete nonexistent block
558 // (test for 200 OK, response counts)
562 // * Delete block on read-only and read-write volume
563 // (test for 200 OK, response with copies_deleted=1,
564 // copies_failed=1, confirm block deleted only on r/w volume)
566 // * Delete block on read-only volume only
567 // (test for 200 OK, response with copies_deleted=0, copies_failed=1,
568 // confirm block not deleted)
570 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
571 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
573 vols := s.handler.volmgr.AllWritable()
574 vols[0].Put(context.Background(), TestHash, TestBlock)
576 // Explicitly set the BlobSigningTTL to 0 for these
577 // tests, to ensure the MockVolume deletes the blocks
578 // even though they have just been created.
579 s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
581 var userToken = "NOT DATA MANAGER TOKEN"
582 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
584 s.cluster.Collections.BlobTrash = true
586 unauthReq := &RequestTester{
591 userReq := &RequestTester{
597 superuserExistingBlockReq := &RequestTester{
600 apiToken: s.cluster.SystemRootToken,
603 superuserNonexistentBlockReq := &RequestTester{
605 uri: "/" + TestHash2,
606 apiToken: s.cluster.SystemRootToken,
609 // Unauthenticated request returns PermissionError.
610 var response *httptest.ResponseRecorder
611 response = IssueRequest(s.handler, unauthReq)
613 "unauthenticated request",
614 PermissionError.HTTPCode,
617 // Authenticated non-admin request returns PermissionError.
618 response = IssueRequest(s.handler, userReq)
620 "authenticated non-admin request",
621 PermissionError.HTTPCode,
624 // Authenticated admin request for nonexistent block.
625 type deletecounter struct {
626 Deleted int `json:"copies_deleted"`
627 Failed int `json:"copies_failed"`
629 var responseDc, expectedDc deletecounter
631 response = IssueRequest(s.handler, superuserNonexistentBlockReq)
633 "data manager request, nonexistent block",
637 // Authenticated admin request for existing block while BlobTrash is false.
638 s.cluster.Collections.BlobTrash = false
639 response = IssueRequest(s.handler, superuserExistingBlockReq)
641 "authenticated request, existing block, method disabled",
642 MethodDisabledError.HTTPCode,
644 s.cluster.Collections.BlobTrash = true
646 // Authenticated admin request for existing block.
647 response = IssueRequest(s.handler, superuserExistingBlockReq)
649 "data manager request, existing block",
652 // Expect response {"copies_deleted":1,"copies_failed":0}
653 expectedDc = deletecounter{1, 0}
654 json.NewDecoder(response.Body).Decode(&responseDc)
655 if responseDc != expectedDc {
656 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
657 expectedDc, responseDc)
659 // Confirm the block has been deleted
660 buf := make([]byte, BlockSize)
661 _, err := vols[0].Get(context.Background(), TestHash, buf)
662 var blockDeleted = os.IsNotExist(err)
664 c.Error("superuserExistingBlockReq: block not deleted")
667 // A DELETE request on a block newer than BlobSigningTTL
668 // should return success but leave the block on the volume.
669 vols[0].Put(context.Background(), TestHash, TestBlock)
670 s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
672 response = IssueRequest(s.handler, superuserExistingBlockReq)
674 "data manager request, existing block",
677 // Expect response {"copies_deleted":1,"copies_failed":0}
678 expectedDc = deletecounter{1, 0}
679 json.NewDecoder(response.Body).Decode(&responseDc)
680 if responseDc != expectedDc {
681 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
682 expectedDc, responseDc)
684 // Confirm the block has NOT been deleted.
685 _, err = vols[0].Get(context.Background(), TestHash, buf)
687 c.Errorf("testing delete on new block: %s\n", err)
693 // Test handling of the PUT /pull statement.
695 // Cases tested: syntactically valid and invalid pull lists, from the
696 // data manager and from unprivileged users:
698 // 1. Valid pull list from an ordinary user
699 // (expected result: 401 Unauthorized)
701 // 2. Invalid pull request from an ordinary user
702 // (expected result: 401 Unauthorized)
704 // 3. Valid pull request from the data manager
705 // (expected result: 200 OK with request body "Received 3 pull
708 // 4. Invalid pull request from the data manager
709 // (expected result: 400 Bad Request)
711 // Test that in the end, the pull manager received a good pull list with
712 // the expected number of requests.
714 // TODO(twp): test concurrency: launch 100 goroutines to update the
715 // pull list simultaneously. Make sure that none of them return 400
716 // Bad Request and that pullq.GetList() returns a valid list.
718 func (s *HandlerSuite) TestPullHandler(c *check.C) {
719 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
721 // Replace the router's pullq -- which the worker goroutines
722 // started by setup() are now receiving from -- with a new
723 // one, so we can see what the handler sends to it.
724 pullq := NewWorkQueue()
725 s.handler.Handler.(*router).pullq = pullq
727 var userToken = "USER TOKEN"
728 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
730 goodJSON := []byte(`[
732 "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
739 "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
743 "locator":"cccccccccccccccccccccccccccccccc+12345",
744 "servers":["http://server1"]
748 badJSON := []byte(`{ "key":"I'm a little teapot" }`)
750 type pullTest struct {
756 var testcases = []pullTest{
758 "Valid pull list from an ordinary user",
759 RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
760 http.StatusUnauthorized,
764 "Invalid pull request from an ordinary user",
765 RequestTester{"/pull", userToken, "PUT", badJSON, ""},
766 http.StatusUnauthorized,
770 "Valid pull request from the data manager",
771 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
773 "Received 3 pull requests\n",
776 "Invalid pull request from the data manager",
777 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
778 http.StatusBadRequest,
783 for _, tst := range testcases {
784 response := IssueRequest(s.handler, &tst.req)
785 ExpectStatusCode(c, tst.name, tst.responseCode, response)
786 ExpectBody(c, tst.name, tst.responseBody, response)
789 // The Keep pull manager should have received one good list with 3
791 for i := 0; i < 3; i++ {
794 case item = <-pullq.NextItem:
795 case <-time.After(time.Second):
798 if _, ok := item.(PullRequest); !ok {
799 c.Errorf("item %v could not be parsed as a PullRequest", item)
803 expectChannelEmpty(c, pullq.NextItem)
810 // Cases tested: syntactically valid and invalid trash lists, from the
811 // data manager and from unprivileged users:
813 // 1. Valid trash list from an ordinary user
814 // (expected result: 401 Unauthorized)
816 // 2. Invalid trash list from an ordinary user
817 // (expected result: 401 Unauthorized)
819 // 3. Valid trash list from the data manager
820 // (expected result: 200 OK with request body "Received 3 trash
823 // 4. Invalid trash list from the data manager
824 // (expected result: 400 Bad Request)
826 // Test that in the end, the trash collector received a good list
827 // trash list with the expected number of requests.
829 // TODO(twp): test concurrency: launch 100 goroutines to update the
830 // pull list simultaneously. Make sure that none of them return 400
831 // Bad Request and that replica.Dump() returns a valid list.
833 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
834 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
835 // Replace the router's trashq -- which the worker goroutines
836 // started by setup() are now receiving from -- with a new
837 // one, so we can see what the handler sends to it.
838 trashq := NewWorkQueue()
839 s.handler.Handler.(*router).trashq = trashq
841 var userToken = "USER TOKEN"
842 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
844 goodJSON := []byte(`[
847 "block_mtime":1409082153
851 "block_mtime":1409082153
855 "block_mtime":1409082153
859 badJSON := []byte(`I am not a valid JSON string`)
861 type trashTest struct {
868 var testcases = []trashTest{
870 "Valid trash list from an ordinary user",
871 RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
872 http.StatusUnauthorized,
876 "Invalid trash list from an ordinary user",
877 RequestTester{"/trash", userToken, "PUT", badJSON, ""},
878 http.StatusUnauthorized,
882 "Valid trash list from the data manager",
883 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
885 "Received 3 trash requests\n",
888 "Invalid trash list from the data manager",
889 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
890 http.StatusBadRequest,
895 for _, tst := range testcases {
896 response := IssueRequest(s.handler, &tst.req)
897 ExpectStatusCode(c, tst.name, tst.responseCode, response)
898 ExpectBody(c, tst.name, tst.responseBody, response)
901 // The trash collector should have received one good list with 3
903 for i := 0; i < 3; i++ {
904 item := <-trashq.NextItem
905 if _, ok := item.(TrashRequest); !ok {
906 c.Errorf("item %v could not be parsed as a TrashRequest", item)
910 expectChannelEmpty(c, trashq.NextItem)
913 // ====================
915 // ====================
917 // IssueTestRequest executes an HTTP request described by rt, to a
918 // REST router. It returns the HTTP response to the request.
919 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
920 response := httptest.NewRecorder()
921 body := bytes.NewReader(rt.requestBody)
922 req, _ := http.NewRequest(rt.method, rt.uri, body)
923 if rt.apiToken != "" {
924 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
926 if rt.storageClasses != "" {
927 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
929 handler.ServeHTTP(response, req)
933 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
934 response := httptest.NewRecorder()
935 body := bytes.NewReader(rt.requestBody)
936 req, _ := http.NewRequest(rt.method, rt.uri, body)
937 if rt.apiToken != "" {
938 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
940 handler.ServeHTTP(response, req)
944 // ExpectStatusCode checks whether a response has the specified status code,
945 // and reports a test failure if not.
946 func ExpectStatusCode(
950 response *httptest.ResponseRecorder) {
951 c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
958 response *httptest.ResponseRecorder) {
959 if expectedBody != "" && response.Body.String() != expectedBody {
960 c.Errorf("%s: expected response body '%s', got %+v",
961 testname, expectedBody, response)
966 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
967 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
969 defer func(orig *bufferPool) {
972 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
974 ok := make(chan struct{})
976 for i := 0; i < 2; i++ {
977 response := IssueRequest(s.handler,
981 requestBody: TestBlock,
984 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
991 case <-time.After(time.Second):
992 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
996 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
998 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
999 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1001 ok := make(chan bool)
1003 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1004 // Unauthenticated request, no server key
1005 // => OK (unsigned response)
1006 unsignedLocator := "/" + TestHash
1007 response := IssueRequest(s.handler,
1010 uri: unsignedLocator,
1011 requestBody: TestBlock,
1014 "TestPutHandlerBufferleak", http.StatusOK, response)
1016 "TestPutHandlerBufferleak",
1017 TestHashPutResp, response)
1022 case <-time.After(20 * time.Second):
1023 // If the buffer pool leaks, the test goroutine hangs.
1024 c.Fatal("test did not finish, assuming pool leaked")
1029 type notifyingResponseRecorder struct {
1030 *httptest.ResponseRecorder
1034 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1038 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1039 s.cluster.Collections.BlobSigning = false
1040 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1042 defer func(orig *bufferPool) {
1045 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1046 defer bufs.Put(bufs.Get(BlockSize))
1048 if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1052 resp := ¬ifyingResponseRecorder{
1053 ResponseRecorder: httptest.NewRecorder(),
1054 closer: make(chan bool, 1),
1056 if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1057 c.Fatal("notifyingResponseRecorder is broken")
1059 // If anyone asks, the client has disconnected.
1062 ok := make(chan struct{})
1064 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1065 s.handler.ServeHTTP(resp, req)
1070 case <-time.After(20 * time.Second):
1071 c.Fatal("request took >20s, close notifier must be broken")
1075 ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1076 for i, v := range s.handler.volmgr.AllWritable() {
1077 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1078 c.Errorf("volume %d got %d calls, expected 0", i, calls)
1083 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1085 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1086 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1088 vols := s.handler.volmgr.AllWritable()
1089 if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1093 ok := make(chan bool)
1095 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1096 // Unauthenticated request, unsigned locator
1098 unsignedLocator := "/" + TestHash
1099 response := IssueRequest(s.handler,
1102 uri: unsignedLocator,
1105 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1107 "Unauthenticated request, unsigned locator",
1114 case <-time.After(20 * time.Second):
1115 // If the buffer pool leaks, the test goroutine hangs.
1116 c.Fatal("test did not finish, assuming pool leaked")
1121 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1122 s.cluster.Volumes = map[string]arvados.Volume{
1123 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1124 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1125 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1127 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1128 rt := RequestTester{
1130 uri: "/" + TestHash,
1131 requestBody: TestBlock,
1134 for _, trial := range []struct {
1139 {"default", "default=1"},
1140 {" , default , default , ", "default=1"},
1141 {"special", "extra=1, special=1"},
1142 {"special, readonly", "extra=1, special=1"},
1143 {"special, nonexistent", "extra=1, special=1"},
1144 {"extra, special", "extra=1, special=1"},
1145 {"default, special", "default=1, extra=1, special=1"},
1147 c.Logf("success case %#v", trial)
1148 rt.storageClasses = trial.ask
1149 resp := IssueRequest(s.handler, &rt)
1150 if trial.expect == "" {
1151 // any non-empty value is correct
1152 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1154 c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1158 for _, trial := range []struct {
1162 {"doesnotexist, readonly"},
1165 c.Logf("failure case %#v", trial)
1166 rt.storageClasses = trial.ask
1167 resp := IssueRequest(s.handler, &rt)
1168 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1172 func sortCommaSeparated(s string) string {
1173 slice := strings.Split(s, ", ")
1175 return strings.Join(slice, ", ")
1178 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1179 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1181 resp := IssueRequest(s.handler, &RequestTester{
1183 uri: "/" + TestHash,
1184 requestBody: TestBlock,
1187 c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1188 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1191 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1192 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1194 // Set up Keep volumes
1195 vols := s.handler.volmgr.AllWritable()
1196 vols[0].Put(context.Background(), TestHash, TestBlock)
1198 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1200 // unauthenticatedReq => UnauthorizedError
1201 unauthenticatedReq := &RequestTester{
1203 uri: "/untrash/" + TestHash,
1205 response := IssueRequest(s.handler, unauthenticatedReq)
1207 "Unauthenticated request",
1208 UnauthorizedError.HTTPCode,
1211 // notDataManagerReq => UnauthorizedError
1212 notDataManagerReq := &RequestTester{
1214 uri: "/untrash/" + TestHash,
1215 apiToken: knownToken,
1218 response = IssueRequest(s.handler, notDataManagerReq)
1220 "Non-datamanager token",
1221 UnauthorizedError.HTTPCode,
1224 // datamanagerWithBadHashReq => StatusBadRequest
1225 datamanagerWithBadHashReq := &RequestTester{
1227 uri: "/untrash/thisisnotalocator",
1228 apiToken: s.cluster.SystemRootToken,
1230 response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1232 "Bad locator in untrash request",
1233 http.StatusBadRequest,
1236 // datamanagerWrongMethodReq => StatusBadRequest
1237 datamanagerWrongMethodReq := &RequestTester{
1239 uri: "/untrash/" + TestHash,
1240 apiToken: s.cluster.SystemRootToken,
1242 response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1244 "Only PUT method is supported for untrash",
1245 http.StatusMethodNotAllowed,
1248 // datamanagerReq => StatusOK
1249 datamanagerReq := &RequestTester{
1251 uri: "/untrash/" + TestHash,
1252 apiToken: s.cluster.SystemRootToken,
1254 response = IssueRequest(s.handler, datamanagerReq)
1259 c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1262 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1263 // Change all volumes to read-only
1264 for uuid, v := range s.cluster.Volumes {
1266 s.cluster.Volumes[uuid] = v
1268 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1270 // datamanagerReq => StatusOK
1271 datamanagerReq := &RequestTester{
1273 uri: "/untrash/" + TestHash,
1274 apiToken: s.cluster.SystemRootToken,
1276 response := IssueRequest(s.handler, datamanagerReq)
1278 "No writable volumes",
1279 http.StatusNotFound,
1283 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1284 s.cluster.ManagementToken = arvadostest.ManagementToken
1285 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1286 pingReq := &RequestTester{
1288 uri: "/_health/ping",
1289 apiToken: arvadostest.ManagementToken,
1291 response := IssueHealthCheckRequest(s.handler, pingReq)
1296 want := `{"health":"OK"}`
1297 if !strings.Contains(response.Body.String(), want) {
1298 c.Errorf("expected response to include %s: got %s", want, response.Body.String())