1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Tests for Keep HTTP handlers:
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
28 "git.arvados.org/arvados.git/lib/config"
29 "git.arvados.org/arvados.git/sdk/go/arvados"
30 "git.arvados.org/arvados.git/sdk/go/arvadostest"
31 "git.arvados.org/arvados.git/sdk/go/ctxlog"
32 "github.com/prometheus/client_golang/prometheus"
33 check "gopkg.in/check.v1"
36 var testServiceURL = func() arvados.URL {
37 return arvados.URL{Host: "localhost:12345", Scheme: "http"}
40 func testCluster(t TB) *arvados.Cluster {
41 cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
45 cluster, err := cfg.GetCluster("")
49 cluster.SystemRootToken = arvadostest.SystemRootToken
50 cluster.ManagementToken = arvadostest.ManagementToken
51 cluster.Collections.BlobSigning = false
55 var _ = check.Suite(&HandlerSuite{})
57 type HandlerSuite struct {
58 cluster *arvados.Cluster
62 func (s *HandlerSuite) SetUpTest(c *check.C) {
63 s.cluster = testCluster(c)
64 s.cluster.Volumes = map[string]arvados.Volume{
65 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
66 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
68 s.handler = &handler{}
71 // A RequestTester represents the parameters for an HTTP request to
72 // be issued on behalf of a unit test.
73 type RequestTester struct {
81 // Test GetBlockHandler on the following situations:
82 // - permissions off, unauthenticated request, unsigned locator
83 // - permissions on, authenticated request, signed locator
84 // - permissions on, authenticated request, unsigned locator
85 // - permissions on, unauthenticated request, signed locator
86 // - permissions on, authenticated request, expired locator
87 // - permissions on, authenticated request, signed locator, transient error from backend
89 func (s *HandlerSuite) TestGetHandler(c *check.C) {
90 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
92 vols := s.handler.volmgr.AllWritable()
93 err := vols[0].Put(context.Background(), TestHash, TestBlock)
94 c.Check(err, check.IsNil)
96 // Create locators for testing.
97 // Turn on permission settings so we can generate signed locators.
98 s.cluster.Collections.BlobSigning = true
99 s.cluster.Collections.BlobSigningKey = knownKey
100 s.cluster.Collections.BlobSigningTTL.Set("5m")
103 unsignedLocator = "/" + TestHash
104 validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
105 expiredTimestamp = time.Now().Add(-time.Hour)
106 signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
107 expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
111 // Test unauthenticated request with permissions off.
112 s.cluster.Collections.BlobSigning = false
114 // Unauthenticated request, unsigned locator
116 response := IssueRequest(s.handler,
119 uri: unsignedLocator,
122 "Unauthenticated request, unsigned locator", http.StatusOK, response)
124 "Unauthenticated request, unsigned locator",
128 receivedLen := response.Header().Get("Content-Length")
129 expectedLen := fmt.Sprintf("%d", len(TestBlock))
130 if receivedLen != expectedLen {
131 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
136 s.cluster.Collections.BlobSigning = true
138 // Authenticated request, signed locator
140 response = IssueRequest(s.handler, &RequestTester{
143 apiToken: knownToken,
146 "Authenticated request, signed locator", http.StatusOK, response)
148 "Authenticated request, signed locator", string(TestBlock), response)
150 receivedLen = response.Header().Get("Content-Length")
151 expectedLen = fmt.Sprintf("%d", len(TestBlock))
152 if receivedLen != expectedLen {
153 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
156 // Authenticated request, unsigned locator
157 // => PermissionError
158 response = IssueRequest(s.handler, &RequestTester{
160 uri: unsignedLocator,
161 apiToken: knownToken,
163 ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
165 // Unauthenticated request, signed locator
166 // => PermissionError
167 response = IssueRequest(s.handler, &RequestTester{
172 "Unauthenticated request, signed locator",
173 PermissionError.HTTPCode, response)
175 // Authenticated request, expired locator
177 response = IssueRequest(s.handler, &RequestTester{
180 apiToken: knownToken,
183 "Authenticated request, expired locator",
184 ExpiredError.HTTPCode, response)
186 // Authenticated request, signed locator
187 // => 503 Server busy (transient error)
189 // Set up the block owning volume to respond with errors
190 vols[0].Volume.(*MockVolume).Bad = true
191 vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
192 response = IssueRequest(s.handler, &RequestTester{
195 apiToken: knownToken,
197 // A transient error from one volume while the other doesn't find the block
198 // should make the service return a 503 so that clients can retry.
200 "Volume backend busy",
204 // Test PutBlockHandler on the following situations:
206 // - with server key, authenticated request, unsigned locator
207 // - with server key, unauthenticated request, unsigned locator
209 func (s *HandlerSuite) TestPutHandler(c *check.C) {
210 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
215 s.cluster.Collections.BlobSigningKey = ""
217 // Unauthenticated request, no server key
218 // => OK (unsigned response)
219 unsignedLocator := "/" + TestHash
220 response := IssueRequest(s.handler,
223 uri: unsignedLocator,
224 requestBody: TestBlock,
228 "Unauthenticated request, no server key", http.StatusOK, response)
230 "Unauthenticated request, no server key",
231 TestHashPutResp, response)
233 // ------------------
234 // With a server key.
236 s.cluster.Collections.BlobSigningKey = knownKey
237 s.cluster.Collections.BlobSigningTTL.Set("5m")
239 // When a permission key is available, the locator returned
240 // from an authenticated PUT request will be signed.
242 // Authenticated PUT, signed locator
243 // => OK (signed response)
244 response = IssueRequest(s.handler,
247 uri: unsignedLocator,
248 requestBody: TestBlock,
249 apiToken: knownToken,
253 "Authenticated PUT, signed locator, with server key",
254 http.StatusOK, response)
255 responseLocator := strings.TrimSpace(response.Body.String())
256 if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
257 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
258 "response '%s' does not contain a valid signature",
262 // Unauthenticated PUT, unsigned locator
264 response = IssueRequest(s.handler,
267 uri: unsignedLocator,
268 requestBody: TestBlock,
272 "Unauthenticated PUT, unsigned locator, with server key",
273 http.StatusOK, response)
275 "Unauthenticated PUT, unsigned locator, with server key",
276 TestHashPutResp, response)
279 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
280 s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
281 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
283 s.cluster.SystemRootToken = "fake-data-manager-token"
284 IssueRequest(s.handler,
288 requestBody: TestBlock,
291 s.cluster.Collections.BlobTrash = true
292 IssueRequest(s.handler,
296 requestBody: TestBlock,
297 apiToken: s.cluster.SystemRootToken,
304 for _, e := range []expect{
305 {"zzzzz-nyw5e-000000000000000", "Get", 0},
306 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
307 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
308 {"zzzzz-nyw5e-000000000000000", "Put", 0},
309 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
310 {"zzzzz-nyw5e-111111111111111", "Get", 0},
311 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
312 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
313 {"zzzzz-nyw5e-111111111111111", "Put", 1},
314 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
316 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
317 c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
322 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
323 s.cluster.Volumes = map[string]arvados.Volume{
324 "zzzzz-nyw5e-111111111111111": {
327 StorageClasses: map[string]bool{"class1": true}},
328 "zzzzz-nyw5e-222222222222222": {
331 StorageClasses: map[string]bool{"class2": true, "class3": true}},
334 for _, trial := range []struct {
335 priority1 int // priority of class1, thus vol1
336 priority2 int // priority of class2
337 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
338 get1 int // expected number of "get" ops on vol1
339 get2 int // expected number of "get" ops on vol2
341 {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2
342 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
343 {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1
344 {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
347 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
348 "class1": {Priority: trial.priority1},
349 "class2": {Priority: trial.priority2},
350 "class3": {Priority: trial.priority3},
352 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
353 IssueRequest(s.handler,
357 requestBody: TestBlock,
358 storageClasses: "class1",
360 IssueRequest(s.handler,
365 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
366 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
370 // Test TOUCH requests.
371 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
372 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
373 vols := s.handler.volmgr.AllWritable()
374 vols[0].Put(context.Background(), TestHash, TestBlock)
375 vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
376 afterPut := time.Now()
377 t, err := vols[0].Mtime(TestHash)
378 c.Assert(err, check.IsNil)
379 c.Assert(t.Before(afterPut), check.Equals, true)
382 "touch with no credentials",
383 http.StatusUnauthorized,
384 IssueRequest(s.handler, &RequestTester{
390 "touch with non-root credentials",
391 http.StatusUnauthorized,
392 IssueRequest(s.handler, &RequestTester{
395 apiToken: arvadostest.ActiveTokenV2,
399 "touch non-existent block",
401 IssueRequest(s.handler, &RequestTester{
403 uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
404 apiToken: s.cluster.SystemRootToken,
407 beforeTouch := time.Now()
411 IssueRequest(s.handler, &RequestTester{
414 apiToken: s.cluster.SystemRootToken,
416 t, err = vols[0].Mtime(TestHash)
417 c.Assert(err, check.IsNil)
418 c.Assert(t.After(beforeTouch), check.Equals, true)
421 // Test /index requests:
422 // - unauthenticated /index request
423 // - unauthenticated /index/prefix request
424 // - authenticated /index request | non-superuser
425 // - authenticated /index/prefix request | non-superuser
426 // - authenticated /index request | superuser
427 // - authenticated /index/prefix request | superuser
429 // The only /index requests that should succeed are those issued by the
430 // superuser. They should pass regardless of the value of BlobSigning.
432 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
433 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
435 // Include multiple blocks on different volumes, and
436 // some metadata files (which should be omitted from index listings)
437 vols := s.handler.volmgr.AllWritable()
438 vols[0].Put(context.Background(), TestHash, TestBlock)
439 vols[1].Put(context.Background(), TestHash2, TestBlock2)
440 vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
441 vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
443 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
445 unauthenticatedReq := &RequestTester{
449 authenticatedReq := &RequestTester{
452 apiToken: knownToken,
454 superuserReq := &RequestTester{
457 apiToken: s.cluster.SystemRootToken,
459 unauthPrefixReq := &RequestTester{
461 uri: "/index/" + TestHash[0:3],
463 authPrefixReq := &RequestTester{
465 uri: "/index/" + TestHash[0:3],
466 apiToken: knownToken,
468 superuserPrefixReq := &RequestTester{
470 uri: "/index/" + TestHash[0:3],
471 apiToken: s.cluster.SystemRootToken,
473 superuserNoSuchPrefixReq := &RequestTester{
476 apiToken: s.cluster.SystemRootToken,
478 superuserInvalidPrefixReq := &RequestTester{
481 apiToken: s.cluster.SystemRootToken,
484 // -------------------------------------------------------------
485 // Only the superuser should be allowed to issue /index requests.
487 // ---------------------------
488 // BlobSigning enabled
489 // This setting should not affect tests passing.
490 s.cluster.Collections.BlobSigning = true
492 // unauthenticated /index request
493 // => UnauthorizedError
494 response := IssueRequest(s.handler, unauthenticatedReq)
496 "permissions on, unauthenticated request",
497 UnauthorizedError.HTTPCode,
500 // unauthenticated /index/prefix request
501 // => UnauthorizedError
502 response = IssueRequest(s.handler, unauthPrefixReq)
504 "permissions on, unauthenticated /index/prefix request",
505 UnauthorizedError.HTTPCode,
508 // authenticated /index request, non-superuser
509 // => UnauthorizedError
510 response = IssueRequest(s.handler, authenticatedReq)
512 "permissions on, authenticated request, non-superuser",
513 UnauthorizedError.HTTPCode,
516 // authenticated /index/prefix request, non-superuser
517 // => UnauthorizedError
518 response = IssueRequest(s.handler, authPrefixReq)
520 "permissions on, authenticated /index/prefix request, non-superuser",
521 UnauthorizedError.HTTPCode,
524 // superuser /index request
526 response = IssueRequest(s.handler, superuserReq)
528 "permissions on, superuser request",
532 // ----------------------------
533 // BlobSigning disabled
534 // Valid Request should still pass.
535 s.cluster.Collections.BlobSigning = false
537 // superuser /index request
539 response = IssueRequest(s.handler, superuserReq)
541 "permissions on, superuser request",
545 expected := `^` + TestHash + `\+\d+ \d+\n` +
546 TestHash2 + `\+\d+ \d+\n\n$`
547 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
548 "permissions on, superuser request"))
550 // superuser /index/prefix request
552 response = IssueRequest(s.handler, superuserPrefixReq)
554 "permissions on, superuser request",
558 expected = `^` + TestHash + `\+\d+ \d+\n\n$`
559 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
560 "permissions on, superuser /index/prefix request"))
562 // superuser /index/{no-such-prefix} request
564 response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
566 "permissions on, superuser request",
570 if "\n" != response.Body.String() {
571 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
574 // superuser /index/{invalid-prefix} request
575 // => StatusBadRequest
576 response = IssueRequest(s.handler, superuserInvalidPrefixReq)
578 "permissions on, superuser request",
579 http.StatusBadRequest,
587 // With no token and with a non-data-manager token:
588 // * Delete existing block
589 // (test for 403 Forbidden, confirm block not deleted)
591 // With data manager token:
593 // * Delete existing block
594 // (test for 200 OK, response counts, confirm block deleted)
596 // * Delete nonexistent block
597 // (test for 200 OK, response counts)
601 // * Delete block on read-only and read-write volume
602 // (test for 200 OK, response with copies_deleted=1,
603 // copies_failed=1, confirm block deleted only on r/w volume)
605 // * Delete block on read-only volume only
606 // (test for 200 OK, response with copies_deleted=0, copies_failed=1,
607 // confirm block not deleted)
609 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
610 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
612 vols := s.handler.volmgr.AllWritable()
613 vols[0].Put(context.Background(), TestHash, TestBlock)
615 // Explicitly set the BlobSigningTTL to 0 for these
616 // tests, to ensure the MockVolume deletes the blocks
617 // even though they have just been created.
618 s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
620 var userToken = "NOT DATA MANAGER TOKEN"
621 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
623 s.cluster.Collections.BlobTrash = true
625 unauthReq := &RequestTester{
630 userReq := &RequestTester{
636 superuserExistingBlockReq := &RequestTester{
639 apiToken: s.cluster.SystemRootToken,
642 superuserNonexistentBlockReq := &RequestTester{
644 uri: "/" + TestHash2,
645 apiToken: s.cluster.SystemRootToken,
648 // Unauthenticated request returns PermissionError.
649 var response *httptest.ResponseRecorder
650 response = IssueRequest(s.handler, unauthReq)
652 "unauthenticated request",
653 PermissionError.HTTPCode,
656 // Authenticated non-admin request returns PermissionError.
657 response = IssueRequest(s.handler, userReq)
659 "authenticated non-admin request",
660 PermissionError.HTTPCode,
663 // Authenticated admin request for nonexistent block.
664 type deletecounter struct {
665 Deleted int `json:"copies_deleted"`
666 Failed int `json:"copies_failed"`
668 var responseDc, expectedDc deletecounter
670 response = IssueRequest(s.handler, superuserNonexistentBlockReq)
672 "data manager request, nonexistent block",
676 // Authenticated admin request for existing block while BlobTrash is false.
677 s.cluster.Collections.BlobTrash = false
678 response = IssueRequest(s.handler, superuserExistingBlockReq)
680 "authenticated request, existing block, method disabled",
681 MethodDisabledError.HTTPCode,
683 s.cluster.Collections.BlobTrash = true
685 // Authenticated admin request for existing block.
686 response = IssueRequest(s.handler, superuserExistingBlockReq)
688 "data manager request, existing block",
691 // Expect response {"copies_deleted":1,"copies_failed":0}
692 expectedDc = deletecounter{1, 0}
693 json.NewDecoder(response.Body).Decode(&responseDc)
694 if responseDc != expectedDc {
695 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
696 expectedDc, responseDc)
698 // Confirm the block has been deleted
699 buf := make([]byte, BlockSize)
700 _, err := vols[0].Get(context.Background(), TestHash, buf)
701 var blockDeleted = os.IsNotExist(err)
703 c.Error("superuserExistingBlockReq: block not deleted")
706 // A DELETE request on a block newer than BlobSigningTTL
707 // should return success but leave the block on the volume.
708 vols[0].Put(context.Background(), TestHash, TestBlock)
709 s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
711 response = IssueRequest(s.handler, superuserExistingBlockReq)
713 "data manager request, existing block",
716 // Expect response {"copies_deleted":1,"copies_failed":0}
717 expectedDc = deletecounter{1, 0}
718 json.NewDecoder(response.Body).Decode(&responseDc)
719 if responseDc != expectedDc {
720 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
721 expectedDc, responseDc)
723 // Confirm the block has NOT been deleted.
724 _, err = vols[0].Get(context.Background(), TestHash, buf)
726 c.Errorf("testing delete on new block: %s\n", err)
732 // Test handling of the PUT /pull statement.
734 // Cases tested: syntactically valid and invalid pull lists, from the
735 // data manager and from unprivileged users:
737 // 1. Valid pull list from an ordinary user
738 // (expected result: 401 Unauthorized)
740 // 2. Invalid pull request from an ordinary user
741 // (expected result: 401 Unauthorized)
743 // 3. Valid pull request from the data manager
744 // (expected result: 200 OK with request body "Received 3 pull
747 // 4. Invalid pull request from the data manager
748 // (expected result: 400 Bad Request)
750 // Test that in the end, the pull manager received a good pull list with
751 // the expected number of requests.
753 // TODO(twp): test concurrency: launch 100 goroutines to update the
754 // pull list simultaneously. Make sure that none of them return 400
755 // Bad Request and that pullq.GetList() returns a valid list.
757 func (s *HandlerSuite) TestPullHandler(c *check.C) {
758 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
760 // Replace the router's pullq -- which the worker goroutines
761 // started by setup() are now receiving from -- with a new
762 // one, so we can see what the handler sends to it.
763 pullq := NewWorkQueue()
764 s.handler.Handler.(*router).pullq = pullq
766 var userToken = "USER TOKEN"
767 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
769 goodJSON := []byte(`[
771 "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
778 "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
782 "locator":"cccccccccccccccccccccccccccccccc+12345",
783 "servers":["http://server1"]
787 badJSON := []byte(`{ "key":"I'm a little teapot" }`)
789 type pullTest struct {
795 var testcases = []pullTest{
797 "Valid pull list from an ordinary user",
798 RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
799 http.StatusUnauthorized,
803 "Invalid pull request from an ordinary user",
804 RequestTester{"/pull", userToken, "PUT", badJSON, ""},
805 http.StatusUnauthorized,
809 "Valid pull request from the data manager",
810 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
812 "Received 3 pull requests\n",
815 "Invalid pull request from the data manager",
816 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
817 http.StatusBadRequest,
822 for _, tst := range testcases {
823 response := IssueRequest(s.handler, &tst.req)
824 ExpectStatusCode(c, tst.name, tst.responseCode, response)
825 ExpectBody(c, tst.name, tst.responseBody, response)
828 // The Keep pull manager should have received one good list with 3
830 for i := 0; i < 3; i++ {
833 case item = <-pullq.NextItem:
834 case <-time.After(time.Second):
837 if _, ok := item.(PullRequest); !ok {
838 c.Errorf("item %v could not be parsed as a PullRequest", item)
842 expectChannelEmpty(c, pullq.NextItem)
849 // Cases tested: syntactically valid and invalid trash lists, from the
850 // data manager and from unprivileged users:
852 // 1. Valid trash list from an ordinary user
853 // (expected result: 401 Unauthorized)
855 // 2. Invalid trash list from an ordinary user
856 // (expected result: 401 Unauthorized)
858 // 3. Valid trash list from the data manager
859 // (expected result: 200 OK with request body "Received 3 trash
862 // 4. Invalid trash list from the data manager
863 // (expected result: 400 Bad Request)
865 // Test that in the end, the trash collector received a good list
866 // trash list with the expected number of requests.
868 // TODO(twp): test concurrency: launch 100 goroutines to update the
869 // pull list simultaneously. Make sure that none of them return 400
870 // Bad Request and that replica.Dump() returns a valid list.
872 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
873 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
874 // Replace the router's trashq -- which the worker goroutines
875 // started by setup() are now receiving from -- with a new
876 // one, so we can see what the handler sends to it.
877 trashq := NewWorkQueue()
878 s.handler.Handler.(*router).trashq = trashq
880 var userToken = "USER TOKEN"
881 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
883 goodJSON := []byte(`[
886 "block_mtime":1409082153
890 "block_mtime":1409082153
894 "block_mtime":1409082153
898 badJSON := []byte(`I am not a valid JSON string`)
900 type trashTest struct {
907 var testcases = []trashTest{
909 "Valid trash list from an ordinary user",
910 RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
911 http.StatusUnauthorized,
915 "Invalid trash list from an ordinary user",
916 RequestTester{"/trash", userToken, "PUT", badJSON, ""},
917 http.StatusUnauthorized,
921 "Valid trash list from the data manager",
922 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
924 "Received 3 trash requests\n",
927 "Invalid trash list from the data manager",
928 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
929 http.StatusBadRequest,
934 for _, tst := range testcases {
935 response := IssueRequest(s.handler, &tst.req)
936 ExpectStatusCode(c, tst.name, tst.responseCode, response)
937 ExpectBody(c, tst.name, tst.responseBody, response)
940 // The trash collector should have received one good list with 3
942 for i := 0; i < 3; i++ {
943 item := <-trashq.NextItem
944 if _, ok := item.(TrashRequest); !ok {
945 c.Errorf("item %v could not be parsed as a TrashRequest", item)
949 expectChannelEmpty(c, trashq.NextItem)
952 // ====================
954 // ====================
956 // IssueTestRequest executes an HTTP request described by rt, to a
957 // REST router. It returns the HTTP response to the request.
958 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
959 response := httptest.NewRecorder()
960 body := bytes.NewReader(rt.requestBody)
961 req, _ := http.NewRequest(rt.method, rt.uri, body)
962 if rt.apiToken != "" {
963 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
965 if rt.storageClasses != "" {
966 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
968 handler.ServeHTTP(response, req)
972 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
973 response := httptest.NewRecorder()
974 body := bytes.NewReader(rt.requestBody)
975 req, _ := http.NewRequest(rt.method, rt.uri, body)
976 if rt.apiToken != "" {
977 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
979 handler.ServeHTTP(response, req)
983 // ExpectStatusCode checks whether a response has the specified status code,
984 // and reports a test failure if not.
985 func ExpectStatusCode(
989 response *httptest.ResponseRecorder) {
990 c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
997 response *httptest.ResponseRecorder) {
998 if expectedBody != "" && response.Body.String() != expectedBody {
999 c.Errorf("%s: expected response body '%s', got %+v",
1000 testname, expectedBody, response)
1005 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1006 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1008 defer func(orig *bufferPool) {
1011 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1013 ok := make(chan struct{})
1015 for i := 0; i < 2; i++ {
1016 response := IssueRequest(s.handler,
1019 uri: "/" + TestHash,
1020 requestBody: TestBlock,
1023 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1030 case <-time.After(time.Second):
1031 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1035 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1037 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1038 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1040 ok := make(chan bool)
1042 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1043 // Unauthenticated request, no server key
1044 // => OK (unsigned response)
1045 unsignedLocator := "/" + TestHash
1046 response := IssueRequest(s.handler,
1049 uri: unsignedLocator,
1050 requestBody: TestBlock,
1053 "TestPutHandlerBufferleak", http.StatusOK, response)
1055 "TestPutHandlerBufferleak",
1056 TestHashPutResp, response)
1061 case <-time.After(20 * time.Second):
1062 // If the buffer pool leaks, the test goroutine hangs.
1063 c.Fatal("test did not finish, assuming pool leaked")
1068 type notifyingResponseRecorder struct {
1069 *httptest.ResponseRecorder
1073 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1077 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1078 s.cluster.Collections.BlobSigning = false
1079 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1081 defer func(orig *bufferPool) {
1084 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1085 defer bufs.Put(bufs.Get(BlockSize))
1087 if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1091 resp := ¬ifyingResponseRecorder{
1092 ResponseRecorder: httptest.NewRecorder(),
1093 closer: make(chan bool, 1),
1095 if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1096 c.Fatal("notifyingResponseRecorder is broken")
1098 // If anyone asks, the client has disconnected.
1101 ok := make(chan struct{})
1103 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1104 s.handler.ServeHTTP(resp, req)
1109 case <-time.After(20 * time.Second):
1110 c.Fatal("request took >20s, close notifier must be broken")
1114 ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1115 for i, v := range s.handler.volmgr.AllWritable() {
1116 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1117 c.Errorf("volume %d got %d calls, expected 0", i, calls)
1122 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1124 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1125 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1127 vols := s.handler.volmgr.AllWritable()
1128 if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1132 ok := make(chan bool)
1134 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1135 // Unauthenticated request, unsigned locator
1137 unsignedLocator := "/" + TestHash
1138 response := IssueRequest(s.handler,
1141 uri: unsignedLocator,
1144 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1146 "Unauthenticated request, unsigned locator",
1153 case <-time.After(20 * time.Second):
1154 // If the buffer pool leaks, the test goroutine hangs.
1155 c.Fatal("test did not finish, assuming pool leaked")
1160 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1161 s.cluster.Volumes = map[string]arvados.Volume{
1162 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1163 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1164 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1166 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1167 rt := RequestTester{
1169 uri: "/" + TestHash,
1170 requestBody: TestBlock,
1173 for _, trial := range []struct {
1178 {"default", "default=1"},
1179 {" , default , default , ", "default=1"},
1180 {"special", "extra=1, special=1"},
1181 {"special, readonly", "extra=1, special=1"},
1182 {"special, nonexistent", "extra=1, special=1"},
1183 {"extra, special", "extra=1, special=1"},
1184 {"default, special", "default=1, extra=1, special=1"},
1186 c.Logf("success case %#v", trial)
1187 rt.storageClasses = trial.ask
1188 resp := IssueRequest(s.handler, &rt)
1189 if trial.expect == "" {
1190 // any non-empty value is correct
1191 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1193 c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1197 for _, trial := range []struct {
1201 {"doesnotexist, readonly"},
1204 c.Logf("failure case %#v", trial)
1205 rt.storageClasses = trial.ask
1206 resp := IssueRequest(s.handler, &rt)
1207 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1211 func sortCommaSeparated(s string) string {
1212 slice := strings.Split(s, ", ")
1214 return strings.Join(slice, ", ")
1217 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1218 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1220 resp := IssueRequest(s.handler, &RequestTester{
1222 uri: "/" + TestHash,
1223 requestBody: TestBlock,
1226 c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1227 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1230 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1231 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1233 // Set up Keep volumes
1234 vols := s.handler.volmgr.AllWritable()
1235 vols[0].Put(context.Background(), TestHash, TestBlock)
1237 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1239 // unauthenticatedReq => UnauthorizedError
1240 unauthenticatedReq := &RequestTester{
1242 uri: "/untrash/" + TestHash,
1244 response := IssueRequest(s.handler, unauthenticatedReq)
1246 "Unauthenticated request",
1247 UnauthorizedError.HTTPCode,
1250 // notDataManagerReq => UnauthorizedError
1251 notDataManagerReq := &RequestTester{
1253 uri: "/untrash/" + TestHash,
1254 apiToken: knownToken,
1257 response = IssueRequest(s.handler, notDataManagerReq)
1259 "Non-datamanager token",
1260 UnauthorizedError.HTTPCode,
1263 // datamanagerWithBadHashReq => StatusBadRequest
1264 datamanagerWithBadHashReq := &RequestTester{
1266 uri: "/untrash/thisisnotalocator",
1267 apiToken: s.cluster.SystemRootToken,
1269 response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1271 "Bad locator in untrash request",
1272 http.StatusBadRequest,
1275 // datamanagerWrongMethodReq => StatusBadRequest
1276 datamanagerWrongMethodReq := &RequestTester{
1278 uri: "/untrash/" + TestHash,
1279 apiToken: s.cluster.SystemRootToken,
1281 response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1283 "Only PUT method is supported for untrash",
1284 http.StatusMethodNotAllowed,
1287 // datamanagerReq => StatusOK
1288 datamanagerReq := &RequestTester{
1290 uri: "/untrash/" + TestHash,
1291 apiToken: s.cluster.SystemRootToken,
1293 response = IssueRequest(s.handler, datamanagerReq)
1298 c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1301 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1302 // Change all volumes to read-only
1303 for uuid, v := range s.cluster.Volumes {
1305 s.cluster.Volumes[uuid] = v
1307 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1309 // datamanagerReq => StatusOK
1310 datamanagerReq := &RequestTester{
1312 uri: "/untrash/" + TestHash,
1313 apiToken: s.cluster.SystemRootToken,
1315 response := IssueRequest(s.handler, datamanagerReq)
1317 "No writable volumes",
1318 http.StatusNotFound,
1322 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1323 s.cluster.ManagementToken = arvadostest.ManagementToken
1324 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1325 pingReq := &RequestTester{
1327 uri: "/_health/ping",
1328 apiToken: arvadostest.ManagementToken,
1330 response := IssueHealthCheckRequest(s.handler, pingReq)
1335 want := `{"health":"OK"}`
1336 if !strings.Contains(response.Body.String(), want) {
1337 c.Errorf("expected response to include %s: got %s", want, response.Body.String())