1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Tests for Keep HTTP handlers:
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
29 "git.arvados.org/arvados.git/lib/config"
30 "git.arvados.org/arvados.git/sdk/go/arvados"
31 "git.arvados.org/arvados.git/sdk/go/arvadostest"
32 "git.arvados.org/arvados.git/sdk/go/ctxlog"
33 "github.com/prometheus/client_golang/prometheus"
34 check "gopkg.in/check.v1"
37 var testServiceURL = func() arvados.URL {
38 return arvados.URL{Host: "localhost:12345", Scheme: "http"}
41 func testCluster(t TB) *arvados.Cluster {
42 cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
46 cluster, err := cfg.GetCluster("")
50 cluster.SystemRootToken = arvadostest.SystemRootToken
51 cluster.ManagementToken = arvadostest.ManagementToken
52 cluster.Collections.BlobSigning = false
56 var _ = check.Suite(&HandlerSuite{})
58 type HandlerSuite struct {
59 cluster *arvados.Cluster
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64 s.cluster = testCluster(c)
65 s.cluster.Volumes = map[string]arvados.Volume{
66 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
69 s.handler = &handler{}
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
82 // Test GetBlockHandler on the following situations:
83 // - permissions off, unauthenticated request, unsigned locator
84 // - permissions on, authenticated request, signed locator
85 // - permissions on, authenticated request, unsigned locator
86 // - permissions on, unauthenticated request, signed locator
87 // - permissions on, authenticated request, expired locator
88 // - permissions on, authenticated request, signed locator, transient error from backend
90 func (s *HandlerSuite) TestGetHandler(c *check.C) {
91 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
93 vols := s.handler.volmgr.AllWritable()
94 err := vols[0].Put(context.Background(), TestHash, TestBlock)
95 c.Check(err, check.IsNil)
97 // Create locators for testing.
98 // Turn on permission settings so we can generate signed locators.
99 s.cluster.Collections.BlobSigning = true
100 s.cluster.Collections.BlobSigningKey = knownKey
101 s.cluster.Collections.BlobSigningTTL.Set("5m")
104 unsignedLocator = "/" + TestHash
105 validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
106 expiredTimestamp = time.Now().Add(-time.Hour)
107 signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
108 expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
112 // Test unauthenticated request with permissions off.
113 s.cluster.Collections.BlobSigning = false
115 // Unauthenticated request, unsigned locator
117 response := IssueRequest(s.handler,
120 uri: unsignedLocator,
123 "Unauthenticated request, unsigned locator", http.StatusOK, response)
125 "Unauthenticated request, unsigned locator",
129 receivedLen := response.Header().Get("Content-Length")
130 expectedLen := fmt.Sprintf("%d", len(TestBlock))
131 if receivedLen != expectedLen {
132 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
137 s.cluster.Collections.BlobSigning = true
139 // Authenticated request, signed locator
141 response = IssueRequest(s.handler, &RequestTester{
144 apiToken: knownToken,
147 "Authenticated request, signed locator", http.StatusOK, response)
149 "Authenticated request, signed locator", string(TestBlock), response)
151 receivedLen = response.Header().Get("Content-Length")
152 expectedLen = fmt.Sprintf("%d", len(TestBlock))
153 if receivedLen != expectedLen {
154 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
157 // Authenticated request, unsigned locator
158 // => PermissionError
159 response = IssueRequest(s.handler, &RequestTester{
161 uri: unsignedLocator,
162 apiToken: knownToken,
164 ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
166 // Unauthenticated request, signed locator
167 // => PermissionError
168 response = IssueRequest(s.handler, &RequestTester{
173 "Unauthenticated request, signed locator",
174 PermissionError.HTTPCode, response)
176 // Authenticated request, expired locator
178 response = IssueRequest(s.handler, &RequestTester{
181 apiToken: knownToken,
184 "Authenticated request, expired locator",
185 ExpiredError.HTTPCode, response)
187 // Authenticated request, signed locator
188 // => 503 Server busy (transient error)
190 // Set up the block owning volume to respond with errors
191 vols[0].Volume.(*MockVolume).Bad = true
192 vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
193 response = IssueRequest(s.handler, &RequestTester{
196 apiToken: knownToken,
198 // A transient error from one volume while the other doesn't find the block
199 // should make the service return a 503 so that clients can retry.
201 "Volume backend busy",
205 // Test PutBlockHandler on the following situations:
207 // - with server key, authenticated request, unsigned locator
208 // - with server key, unauthenticated request, unsigned locator
210 func (s *HandlerSuite) TestPutHandler(c *check.C) {
211 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
216 s.cluster.Collections.BlobSigningKey = ""
218 // Unauthenticated request, no server key
219 // => OK (unsigned response)
220 unsignedLocator := "/" + TestHash
221 response := IssueRequest(s.handler,
224 uri: unsignedLocator,
225 requestBody: TestBlock,
229 "Unauthenticated request, no server key", http.StatusOK, response)
231 "Unauthenticated request, no server key",
232 TestHashPutResp, response)
234 // ------------------
235 // With a server key.
237 s.cluster.Collections.BlobSigningKey = knownKey
238 s.cluster.Collections.BlobSigningTTL.Set("5m")
240 // When a permission key is available, the locator returned
241 // from an authenticated PUT request will be signed.
243 // Authenticated PUT, signed locator
244 // => OK (signed response)
245 response = IssueRequest(s.handler,
248 uri: unsignedLocator,
249 requestBody: TestBlock,
250 apiToken: knownToken,
254 "Authenticated PUT, signed locator, with server key",
255 http.StatusOK, response)
256 responseLocator := strings.TrimSpace(response.Body.String())
257 if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
258 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
259 "response '%s' does not contain a valid signature",
263 // Unauthenticated PUT, unsigned locator
265 response = IssueRequest(s.handler,
268 uri: unsignedLocator,
269 requestBody: TestBlock,
273 "Unauthenticated PUT, unsigned locator, with server key",
274 http.StatusOK, response)
276 "Unauthenticated PUT, unsigned locator, with server key",
277 TestHashPutResp, response)
280 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
281 s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
282 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
284 s.cluster.SystemRootToken = "fake-data-manager-token"
285 IssueRequest(s.handler,
289 requestBody: TestBlock,
292 s.cluster.Collections.BlobTrash = true
293 IssueRequest(s.handler,
297 requestBody: TestBlock,
298 apiToken: s.cluster.SystemRootToken,
305 for _, e := range []expect{
306 {"zzzzz-nyw5e-000000000000000", "Get", 0},
307 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
308 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
309 {"zzzzz-nyw5e-000000000000000", "Put", 0},
310 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
311 {"zzzzz-nyw5e-111111111111111", "Get", 0},
312 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
313 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
314 {"zzzzz-nyw5e-111111111111111", "Put", 1},
315 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
317 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
318 c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
323 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
324 s.cluster.Volumes = map[string]arvados.Volume{
325 "zzzzz-nyw5e-111111111111111": {
328 StorageClasses: map[string]bool{"class1": true}},
329 "zzzzz-nyw5e-222222222222222": {
332 StorageClasses: map[string]bool{"class2": true, "class3": true}},
335 for _, trial := range []struct {
336 priority1 int // priority of class1, thus vol1
337 priority2 int // priority of class2
338 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
339 get1 int // expected number of "get" ops on vol1
340 get2 int // expected number of "get" ops on vol2
342 {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2
343 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
344 {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1
345 {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
348 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
349 "class1": {Priority: trial.priority1},
350 "class2": {Priority: trial.priority2},
351 "class3": {Priority: trial.priority3},
353 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
354 IssueRequest(s.handler,
358 requestBody: TestBlock,
359 storageClasses: "class1",
361 IssueRequest(s.handler,
366 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
367 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
371 func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
372 s.cluster.Volumes = map[string]arvados.Volume{
373 "zzzzz-nyw5e-111111111111111": {
377 StorageClasses: map[string]bool{"class1": true}},
379 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
380 resp := IssueRequest(s.handler,
384 requestBody: TestBlock,
385 storageClasses: "class1",
387 c.Check(resp.Code, check.Equals, FullError.HTTPCode)
388 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
391 func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
392 s.cluster.Volumes = map[string]arvados.Volume{
393 "zzzzz-nyw5e-111111111111111": {
396 StorageClasses: map[string]bool{"class1": true}},
397 "zzzzz-nyw5e-121212121212121": {
400 StorageClasses: map[string]bool{"class1": true, "class2": true}},
401 "zzzzz-nyw5e-222222222222222": {
404 StorageClasses: map[string]bool{"class2": true}},
407 for _, trial := range []struct {
408 setCounter uint32 // value to stuff vm.counter, to control offset
409 classes string // desired classes
410 put111 int // expected number of "put" ops on 11111... after 2x put reqs
411 put121 int // expected number of "put" ops on 12121...
412 put222 int // expected number of "put" ops on 22222...
413 cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs
414 cmp121 int // expected number of "compare" ops on 12121...
415 cmp222 int // expected number of "compare" ops on 22222...
419 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
422 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
425 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
427 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
428 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
429 {0, "class1,class2,class404",
431 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
434 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
439 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
440 atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
441 for i := 0; i < 2; i++ {
442 IssueRequest(s.handler,
446 requestBody: TestBlock,
447 storageClasses: trial.classes,
450 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
451 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
452 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
453 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
454 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
455 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
459 // Test TOUCH requests.
460 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
461 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
462 vols := s.handler.volmgr.AllWritable()
463 vols[0].Put(context.Background(), TestHash, TestBlock)
464 vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
465 afterPut := time.Now()
466 t, err := vols[0].Mtime(TestHash)
467 c.Assert(err, check.IsNil)
468 c.Assert(t.Before(afterPut), check.Equals, true)
471 "touch with no credentials",
472 http.StatusUnauthorized,
473 IssueRequest(s.handler, &RequestTester{
479 "touch with non-root credentials",
480 http.StatusUnauthorized,
481 IssueRequest(s.handler, &RequestTester{
484 apiToken: arvadostest.ActiveTokenV2,
488 "touch non-existent block",
490 IssueRequest(s.handler, &RequestTester{
492 uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
493 apiToken: s.cluster.SystemRootToken,
496 beforeTouch := time.Now()
500 IssueRequest(s.handler, &RequestTester{
503 apiToken: s.cluster.SystemRootToken,
505 t, err = vols[0].Mtime(TestHash)
506 c.Assert(err, check.IsNil)
507 c.Assert(t.After(beforeTouch), check.Equals, true)
510 // Test /index requests:
511 // - unauthenticated /index request
512 // - unauthenticated /index/prefix request
513 // - authenticated /index request | non-superuser
514 // - authenticated /index/prefix request | non-superuser
515 // - authenticated /index request | superuser
516 // - authenticated /index/prefix request | superuser
518 // The only /index requests that should succeed are those issued by the
519 // superuser. They should pass regardless of the value of BlobSigning.
521 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
522 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
524 // Include multiple blocks on different volumes, and
525 // some metadata files (which should be omitted from index listings)
526 vols := s.handler.volmgr.AllWritable()
527 vols[0].Put(context.Background(), TestHash, TestBlock)
528 vols[1].Put(context.Background(), TestHash2, TestBlock2)
529 vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
530 vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
532 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
534 unauthenticatedReq := &RequestTester{
538 authenticatedReq := &RequestTester{
541 apiToken: knownToken,
543 superuserReq := &RequestTester{
546 apiToken: s.cluster.SystemRootToken,
548 unauthPrefixReq := &RequestTester{
550 uri: "/index/" + TestHash[0:3],
552 authPrefixReq := &RequestTester{
554 uri: "/index/" + TestHash[0:3],
555 apiToken: knownToken,
557 superuserPrefixReq := &RequestTester{
559 uri: "/index/" + TestHash[0:3],
560 apiToken: s.cluster.SystemRootToken,
562 superuserNoSuchPrefixReq := &RequestTester{
565 apiToken: s.cluster.SystemRootToken,
567 superuserInvalidPrefixReq := &RequestTester{
570 apiToken: s.cluster.SystemRootToken,
573 // -------------------------------------------------------------
574 // Only the superuser should be allowed to issue /index requests.
576 // ---------------------------
577 // BlobSigning enabled
578 // This setting should not affect tests passing.
579 s.cluster.Collections.BlobSigning = true
581 // unauthenticated /index request
582 // => UnauthorizedError
583 response := IssueRequest(s.handler, unauthenticatedReq)
585 "permissions on, unauthenticated request",
586 UnauthorizedError.HTTPCode,
589 // unauthenticated /index/prefix request
590 // => UnauthorizedError
591 response = IssueRequest(s.handler, unauthPrefixReq)
593 "permissions on, unauthenticated /index/prefix request",
594 UnauthorizedError.HTTPCode,
597 // authenticated /index request, non-superuser
598 // => UnauthorizedError
599 response = IssueRequest(s.handler, authenticatedReq)
601 "permissions on, authenticated request, non-superuser",
602 UnauthorizedError.HTTPCode,
605 // authenticated /index/prefix request, non-superuser
606 // => UnauthorizedError
607 response = IssueRequest(s.handler, authPrefixReq)
609 "permissions on, authenticated /index/prefix request, non-superuser",
610 UnauthorizedError.HTTPCode,
613 // superuser /index request
615 response = IssueRequest(s.handler, superuserReq)
617 "permissions on, superuser request",
621 // ----------------------------
622 // BlobSigning disabled
623 // Valid Request should still pass.
624 s.cluster.Collections.BlobSigning = false
626 // superuser /index request
628 response = IssueRequest(s.handler, superuserReq)
630 "permissions on, superuser request",
634 expected := `^` + TestHash + `\+\d+ \d+\n` +
635 TestHash2 + `\+\d+ \d+\n\n$`
636 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
637 "permissions on, superuser request"))
639 // superuser /index/prefix request
641 response = IssueRequest(s.handler, superuserPrefixReq)
643 "permissions on, superuser request",
647 expected = `^` + TestHash + `\+\d+ \d+\n\n$`
648 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
649 "permissions on, superuser /index/prefix request"))
651 // superuser /index/{no-such-prefix} request
653 response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
655 "permissions on, superuser request",
659 if "\n" != response.Body.String() {
660 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
663 // superuser /index/{invalid-prefix} request
664 // => StatusBadRequest
665 response = IssueRequest(s.handler, superuserInvalidPrefixReq)
667 "permissions on, superuser request",
668 http.StatusBadRequest,
676 // With no token and with a non-data-manager token:
677 // * Delete existing block
678 // (test for 403 Forbidden, confirm block not deleted)
680 // With data manager token:
682 // * Delete existing block
683 // (test for 200 OK, response counts, confirm block deleted)
685 // * Delete nonexistent block
686 // (test for 200 OK, response counts)
690 // * Delete block on read-only and read-write volume
691 // (test for 200 OK, response with copies_deleted=1,
692 // copies_failed=1, confirm block deleted only on r/w volume)
694 // * Delete block on read-only volume only
695 // (test for 200 OK, response with copies_deleted=0, copies_failed=1,
696 // confirm block not deleted)
698 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
699 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
701 vols := s.handler.volmgr.AllWritable()
702 vols[0].Put(context.Background(), TestHash, TestBlock)
704 // Explicitly set the BlobSigningTTL to 0 for these
705 // tests, to ensure the MockVolume deletes the blocks
706 // even though they have just been created.
707 s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
709 var userToken = "NOT DATA MANAGER TOKEN"
710 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
712 s.cluster.Collections.BlobTrash = true
714 unauthReq := &RequestTester{
719 userReq := &RequestTester{
725 superuserExistingBlockReq := &RequestTester{
728 apiToken: s.cluster.SystemRootToken,
731 superuserNonexistentBlockReq := &RequestTester{
733 uri: "/" + TestHash2,
734 apiToken: s.cluster.SystemRootToken,
737 // Unauthenticated request returns PermissionError.
738 var response *httptest.ResponseRecorder
739 response = IssueRequest(s.handler, unauthReq)
741 "unauthenticated request",
742 PermissionError.HTTPCode,
745 // Authenticated non-admin request returns PermissionError.
746 response = IssueRequest(s.handler, userReq)
748 "authenticated non-admin request",
749 PermissionError.HTTPCode,
752 // Authenticated admin request for nonexistent block.
753 type deletecounter struct {
754 Deleted int `json:"copies_deleted"`
755 Failed int `json:"copies_failed"`
757 var responseDc, expectedDc deletecounter
759 response = IssueRequest(s.handler, superuserNonexistentBlockReq)
761 "data manager request, nonexistent block",
765 // Authenticated admin request for existing block while BlobTrash is false.
766 s.cluster.Collections.BlobTrash = false
767 response = IssueRequest(s.handler, superuserExistingBlockReq)
769 "authenticated request, existing block, method disabled",
770 MethodDisabledError.HTTPCode,
772 s.cluster.Collections.BlobTrash = true
774 // Authenticated admin request for existing block.
775 response = IssueRequest(s.handler, superuserExistingBlockReq)
777 "data manager request, existing block",
780 // Expect response {"copies_deleted":1,"copies_failed":0}
781 expectedDc = deletecounter{1, 0}
782 json.NewDecoder(response.Body).Decode(&responseDc)
783 if responseDc != expectedDc {
784 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
785 expectedDc, responseDc)
787 // Confirm the block has been deleted
788 buf := make([]byte, BlockSize)
789 _, err := vols[0].Get(context.Background(), TestHash, buf)
790 var blockDeleted = os.IsNotExist(err)
792 c.Error("superuserExistingBlockReq: block not deleted")
795 // A DELETE request on a block newer than BlobSigningTTL
796 // should return success but leave the block on the volume.
797 vols[0].Put(context.Background(), TestHash, TestBlock)
798 s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
800 response = IssueRequest(s.handler, superuserExistingBlockReq)
802 "data manager request, existing block",
805 // Expect response {"copies_deleted":1,"copies_failed":0}
806 expectedDc = deletecounter{1, 0}
807 json.NewDecoder(response.Body).Decode(&responseDc)
808 if responseDc != expectedDc {
809 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
810 expectedDc, responseDc)
812 // Confirm the block has NOT been deleted.
813 _, err = vols[0].Get(context.Background(), TestHash, buf)
815 c.Errorf("testing delete on new block: %s\n", err)
821 // Test handling of the PUT /pull statement.
823 // Cases tested: syntactically valid and invalid pull lists, from the
824 // data manager and from unprivileged users:
826 // 1. Valid pull list from an ordinary user
827 // (expected result: 401 Unauthorized)
829 // 2. Invalid pull request from an ordinary user
830 // (expected result: 401 Unauthorized)
832 // 3. Valid pull request from the data manager
833 // (expected result: 200 OK with request body "Received 3 pull
836 // 4. Invalid pull request from the data manager
837 // (expected result: 400 Bad Request)
839 // Test that in the end, the pull manager received a good pull list with
840 // the expected number of requests.
842 // TODO(twp): test concurrency: launch 100 goroutines to update the
843 // pull list simultaneously. Make sure that none of them return 400
844 // Bad Request and that pullq.GetList() returns a valid list.
846 func (s *HandlerSuite) TestPullHandler(c *check.C) {
847 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
849 // Replace the router's pullq -- which the worker goroutines
850 // started by setup() are now receiving from -- with a new
851 // one, so we can see what the handler sends to it.
852 pullq := NewWorkQueue()
853 s.handler.Handler.(*router).pullq = pullq
855 var userToken = "USER TOKEN"
856 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
858 goodJSON := []byte(`[
860 "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
867 "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
871 "locator":"cccccccccccccccccccccccccccccccc+12345",
872 "servers":["http://server1"]
876 badJSON := []byte(`{ "key":"I'm a little teapot" }`)
878 type pullTest struct {
884 var testcases = []pullTest{
886 "Valid pull list from an ordinary user",
887 RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
888 http.StatusUnauthorized,
892 "Invalid pull request from an ordinary user",
893 RequestTester{"/pull", userToken, "PUT", badJSON, ""},
894 http.StatusUnauthorized,
898 "Valid pull request from the data manager",
899 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
901 "Received 3 pull requests\n",
904 "Invalid pull request from the data manager",
905 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
906 http.StatusBadRequest,
911 for _, tst := range testcases {
912 response := IssueRequest(s.handler, &tst.req)
913 ExpectStatusCode(c, tst.name, tst.responseCode, response)
914 ExpectBody(c, tst.name, tst.responseBody, response)
917 // The Keep pull manager should have received one good list with 3
919 for i := 0; i < 3; i++ {
922 case item = <-pullq.NextItem:
923 case <-time.After(time.Second):
926 if _, ok := item.(PullRequest); !ok {
927 c.Errorf("item %v could not be parsed as a PullRequest", item)
931 expectChannelEmpty(c, pullq.NextItem)
938 // Cases tested: syntactically valid and invalid trash lists, from the
939 // data manager and from unprivileged users:
941 // 1. Valid trash list from an ordinary user
942 // (expected result: 401 Unauthorized)
944 // 2. Invalid trash list from an ordinary user
945 // (expected result: 401 Unauthorized)
947 // 3. Valid trash list from the data manager
948 // (expected result: 200 OK with request body "Received 3 trash
951 // 4. Invalid trash list from the data manager
952 // (expected result: 400 Bad Request)
954 // Test that in the end, the trash collector received a good list
955 // trash list with the expected number of requests.
957 // TODO(twp): test concurrency: launch 100 goroutines to update the
958 // pull list simultaneously. Make sure that none of them return 400
959 // Bad Request and that replica.Dump() returns a valid list.
961 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
962 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
963 // Replace the router's trashq -- which the worker goroutines
964 // started by setup() are now receiving from -- with a new
965 // one, so we can see what the handler sends to it.
966 trashq := NewWorkQueue()
967 s.handler.Handler.(*router).trashq = trashq
969 var userToken = "USER TOKEN"
970 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
972 goodJSON := []byte(`[
975 "block_mtime":1409082153
979 "block_mtime":1409082153
983 "block_mtime":1409082153
987 badJSON := []byte(`I am not a valid JSON string`)
989 type trashTest struct {
996 var testcases = []trashTest{
998 "Valid trash list from an ordinary user",
999 RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
1000 http.StatusUnauthorized,
1004 "Invalid trash list from an ordinary user",
1005 RequestTester{"/trash", userToken, "PUT", badJSON, ""},
1006 http.StatusUnauthorized,
1010 "Valid trash list from the data manager",
1011 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
1013 "Received 3 trash requests\n",
1016 "Invalid trash list from the data manager",
1017 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
1018 http.StatusBadRequest,
1023 for _, tst := range testcases {
1024 response := IssueRequest(s.handler, &tst.req)
1025 ExpectStatusCode(c, tst.name, tst.responseCode, response)
1026 ExpectBody(c, tst.name, tst.responseBody, response)
1029 // The trash collector should have received one good list with 3
1031 for i := 0; i < 3; i++ {
1032 item := <-trashq.NextItem
1033 if _, ok := item.(TrashRequest); !ok {
1034 c.Errorf("item %v could not be parsed as a TrashRequest", item)
1038 expectChannelEmpty(c, trashq.NextItem)
1041 // ====================
1043 // ====================
1045 // IssueTestRequest executes an HTTP request described by rt, to a
1046 // REST router. It returns the HTTP response to the request.
1047 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1048 response := httptest.NewRecorder()
1049 body := bytes.NewReader(rt.requestBody)
1050 req, _ := http.NewRequest(rt.method, rt.uri, body)
1051 if rt.apiToken != "" {
1052 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
1054 if rt.storageClasses != "" {
1055 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
1057 handler.ServeHTTP(response, req)
1061 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1062 response := httptest.NewRecorder()
1063 body := bytes.NewReader(rt.requestBody)
1064 req, _ := http.NewRequest(rt.method, rt.uri, body)
1065 if rt.apiToken != "" {
1066 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
1068 handler.ServeHTTP(response, req)
1072 // ExpectStatusCode checks whether a response has the specified status code,
1073 // and reports a test failure if not.
1074 func ExpectStatusCode(
1078 response *httptest.ResponseRecorder) {
1079 c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
1085 expectedBody string,
1086 response *httptest.ResponseRecorder) {
1087 if expectedBody != "" && response.Body.String() != expectedBody {
1088 c.Errorf("%s: expected response body '%s', got %+v",
1089 testname, expectedBody, response)
1094 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1095 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1097 defer func(orig *bufferPool) {
1100 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1102 ok := make(chan struct{})
1104 for i := 0; i < 2; i++ {
1105 response := IssueRequest(s.handler,
1108 uri: "/" + TestHash,
1109 requestBody: TestBlock,
1112 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1119 case <-time.After(time.Second):
1120 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1124 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1126 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1127 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1129 ok := make(chan bool)
1131 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1132 // Unauthenticated request, no server key
1133 // => OK (unsigned response)
1134 unsignedLocator := "/" + TestHash
1135 response := IssueRequest(s.handler,
1138 uri: unsignedLocator,
1139 requestBody: TestBlock,
1142 "TestPutHandlerBufferleak", http.StatusOK, response)
1144 "TestPutHandlerBufferleak",
1145 TestHashPutResp, response)
1150 case <-time.After(20 * time.Second):
1151 // If the buffer pool leaks, the test goroutine hangs.
1152 c.Fatal("test did not finish, assuming pool leaked")
1157 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1158 s.cluster.Collections.BlobSigning = false
1159 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1161 defer func(orig *bufferPool) {
1164 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1165 defer bufs.Put(bufs.Get(BlockSize))
1167 err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
1168 c.Assert(err, check.IsNil)
1170 resp := httptest.NewRecorder()
1171 ok := make(chan struct{})
1173 ctx, cancel := context.WithCancel(context.Background())
1174 req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1176 s.handler.ServeHTTP(resp, req)
1181 case <-time.After(20 * time.Second):
1182 c.Fatal("request took >20s, close notifier must be broken")
1186 ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
1187 for i, v := range s.handler.volmgr.AllWritable() {
1188 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1189 c.Errorf("volume %d got %d calls, expected 0", i, calls)
1194 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1196 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1197 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1199 vols := s.handler.volmgr.AllWritable()
1200 if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1204 ok := make(chan bool)
1206 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1207 // Unauthenticated request, unsigned locator
1209 unsignedLocator := "/" + TestHash
1210 response := IssueRequest(s.handler,
1213 uri: unsignedLocator,
1216 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1218 "Unauthenticated request, unsigned locator",
1225 case <-time.After(20 * time.Second):
1226 // If the buffer pool leaks, the test goroutine hangs.
1227 c.Fatal("test did not finish, assuming pool leaked")
1232 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1233 s.cluster.Volumes = map[string]arvados.Volume{
1234 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1235 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1236 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1238 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1239 rt := RequestTester{
1241 uri: "/" + TestHash,
1242 requestBody: TestBlock,
1245 for _, trial := range []struct {
1250 {"default", "default=1"},
1251 {" , default , default , ", "default=1"},
1252 {"special", "extra=1, special=1"},
1253 {"special, readonly", "extra=1, special=1"},
1254 {"special, nonexistent", "extra=1, special=1"},
1255 {"extra, special", "extra=1, special=1"},
1256 {"default, special", "default=1, extra=1, special=1"},
1258 c.Logf("success case %#v", trial)
1259 rt.storageClasses = trial.ask
1260 resp := IssueRequest(s.handler, &rt)
1261 if trial.expect == "" {
1262 // any non-empty value is correct
1263 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1265 c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1269 for _, trial := range []struct {
1273 {"doesnotexist, readonly"},
1276 c.Logf("failure case %#v", trial)
1277 rt.storageClasses = trial.ask
1278 resp := IssueRequest(s.handler, &rt)
1279 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1283 func sortCommaSeparated(s string) string {
1284 slice := strings.Split(s, ", ")
1286 return strings.Join(slice, ", ")
1289 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1290 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1292 resp := IssueRequest(s.handler, &RequestTester{
1294 uri: "/" + TestHash,
1295 requestBody: TestBlock,
1298 c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1299 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1302 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1303 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1305 // Set up Keep volumes
1306 vols := s.handler.volmgr.AllWritable()
1307 vols[0].Put(context.Background(), TestHash, TestBlock)
1309 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1311 // unauthenticatedReq => UnauthorizedError
1312 unauthenticatedReq := &RequestTester{
1314 uri: "/untrash/" + TestHash,
1316 response := IssueRequest(s.handler, unauthenticatedReq)
1318 "Unauthenticated request",
1319 UnauthorizedError.HTTPCode,
1322 // notDataManagerReq => UnauthorizedError
1323 notDataManagerReq := &RequestTester{
1325 uri: "/untrash/" + TestHash,
1326 apiToken: knownToken,
1329 response = IssueRequest(s.handler, notDataManagerReq)
1331 "Non-datamanager token",
1332 UnauthorizedError.HTTPCode,
1335 // datamanagerWithBadHashReq => StatusBadRequest
1336 datamanagerWithBadHashReq := &RequestTester{
1338 uri: "/untrash/thisisnotalocator",
1339 apiToken: s.cluster.SystemRootToken,
1341 response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1343 "Bad locator in untrash request",
1344 http.StatusBadRequest,
1347 // datamanagerWrongMethodReq => StatusBadRequest
1348 datamanagerWrongMethodReq := &RequestTester{
1350 uri: "/untrash/" + TestHash,
1351 apiToken: s.cluster.SystemRootToken,
1353 response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1355 "Only PUT method is supported for untrash",
1356 http.StatusMethodNotAllowed,
1359 // datamanagerReq => StatusOK
1360 datamanagerReq := &RequestTester{
1362 uri: "/untrash/" + TestHash,
1363 apiToken: s.cluster.SystemRootToken,
1365 response = IssueRequest(s.handler, datamanagerReq)
1370 c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1373 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1374 // Change all volumes to read-only
1375 for uuid, v := range s.cluster.Volumes {
1377 s.cluster.Volumes[uuid] = v
1379 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1381 // datamanagerReq => StatusOK
1382 datamanagerReq := &RequestTester{
1384 uri: "/untrash/" + TestHash,
1385 apiToken: s.cluster.SystemRootToken,
1387 response := IssueRequest(s.handler, datamanagerReq)
1389 "No writable volumes",
1390 http.StatusNotFound,
1394 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1395 s.cluster.ManagementToken = arvadostest.ManagementToken
1396 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1397 pingReq := &RequestTester{
1399 uri: "/_health/ping",
1400 apiToken: arvadostest.ManagementToken,
1402 response := IssueHealthCheckRequest(s.handler, pingReq)
1407 want := `{"health":"OK"}`
1408 if !strings.Contains(response.Body.String(), want) {
1409 c.Errorf("expected response to include %s: got %s", want, response.Body.String())