1 // Copyright (C) The Arvados Authors. All rights reserved.
3 // SPDX-License-Identifier: AGPL-3.0
5 // Tests for Keep HTTP handlers:
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
29 "git.arvados.org/arvados.git/lib/config"
30 "git.arvados.org/arvados.git/sdk/go/arvados"
31 "git.arvados.org/arvados.git/sdk/go/arvadostest"
32 "git.arvados.org/arvados.git/sdk/go/ctxlog"
33 "github.com/prometheus/client_golang/prometheus"
34 check "gopkg.in/check.v1"
37 var testServiceURL = func() arvados.URL {
38 return arvados.URL{Host: "localhost:12345", Scheme: "http"}
41 func testCluster(t TB) *arvados.Cluster {
42 cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
46 cluster, err := cfg.GetCluster("")
50 cluster.SystemRootToken = arvadostest.SystemRootToken
51 cluster.ManagementToken = arvadostest.ManagementToken
52 cluster.Collections.BlobSigning = false
56 var _ = check.Suite(&HandlerSuite{})
58 type HandlerSuite struct {
59 cluster *arvados.Cluster
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64 s.cluster = testCluster(c)
65 s.cluster.Volumes = map[string]arvados.Volume{
66 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
69 s.handler = &handler{}
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
82 // Test GetBlockHandler on the following situations:
83 // - permissions off, unauthenticated request, unsigned locator
84 // - permissions on, authenticated request, signed locator
85 // - permissions on, authenticated request, unsigned locator
86 // - permissions on, unauthenticated request, signed locator
87 // - permissions on, authenticated request, expired locator
88 // - permissions on, authenticated request, signed locator, transient error from backend
89 func (s *HandlerSuite) TestGetHandler(c *check.C) {
90 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
92 vols := s.handler.volmgr.AllWritable()
93 err := vols[0].Put(context.Background(), TestHash, TestBlock)
94 c.Check(err, check.IsNil)
96 // Create locators for testing.
97 // Turn on permission settings so we can generate signed locators.
98 s.cluster.Collections.BlobSigning = true
99 s.cluster.Collections.BlobSigningKey = knownKey
100 s.cluster.Collections.BlobSigningTTL.Set("5m")
103 unsignedLocator = "/" + TestHash
104 validTimestamp = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
105 expiredTimestamp = time.Now().Add(-time.Hour)
106 signedLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
107 expiredLocator = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
111 // Test unauthenticated request with permissions off.
112 s.cluster.Collections.BlobSigning = false
114 // Unauthenticated request, unsigned locator
116 response := IssueRequest(s.handler,
119 uri: unsignedLocator,
122 "Unauthenticated request, unsigned locator", http.StatusOK, response)
124 "Unauthenticated request, unsigned locator",
128 receivedLen := response.Header().Get("Content-Length")
129 expectedLen := fmt.Sprintf("%d", len(TestBlock))
130 if receivedLen != expectedLen {
131 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
136 s.cluster.Collections.BlobSigning = true
138 // Authenticated request, signed locator
140 response = IssueRequest(s.handler, &RequestTester{
143 apiToken: knownToken,
146 "Authenticated request, signed locator", http.StatusOK, response)
148 "Authenticated request, signed locator", string(TestBlock), response)
150 receivedLen = response.Header().Get("Content-Length")
151 expectedLen = fmt.Sprintf("%d", len(TestBlock))
152 if receivedLen != expectedLen {
153 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
156 // Authenticated request, unsigned locator
157 // => PermissionError
158 response = IssueRequest(s.handler, &RequestTester{
160 uri: unsignedLocator,
161 apiToken: knownToken,
163 ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
165 // Unauthenticated request, signed locator
166 // => PermissionError
167 response = IssueRequest(s.handler, &RequestTester{
172 "Unauthenticated request, signed locator",
173 PermissionError.HTTPCode, response)
175 // Authenticated request, expired locator
177 response = IssueRequest(s.handler, &RequestTester{
180 apiToken: knownToken,
183 "Authenticated request, expired locator",
184 ExpiredError.HTTPCode, response)
186 // Authenticated request, signed locator
187 // => 503 Server busy (transient error)
189 // Set up the block owning volume to respond with errors
190 vols[0].Volume.(*MockVolume).Bad = true
191 vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
192 response = IssueRequest(s.handler, &RequestTester{
195 apiToken: knownToken,
197 // A transient error from one volume while the other doesn't find the block
198 // should make the service return a 503 so that clients can retry.
200 "Volume backend busy",
204 // Test PutBlockHandler on the following situations:
206 // - with server key, authenticated request, unsigned locator
207 // - with server key, unauthenticated request, unsigned locator
208 func (s *HandlerSuite) TestPutHandler(c *check.C) {
209 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
214 s.cluster.Collections.BlobSigningKey = ""
216 // Unauthenticated request, no server key
217 // => OK (unsigned response)
218 unsignedLocator := "/" + TestHash
219 response := IssueRequest(s.handler,
222 uri: unsignedLocator,
223 requestBody: TestBlock,
227 "Unauthenticated request, no server key", http.StatusOK, response)
229 "Unauthenticated request, no server key",
230 TestHashPutResp, response)
232 // ------------------
233 // With a server key.
235 s.cluster.Collections.BlobSigningKey = knownKey
236 s.cluster.Collections.BlobSigningTTL.Set("5m")
238 // When a permission key is available, the locator returned
239 // from an authenticated PUT request will be signed.
241 // Authenticated PUT, signed locator
242 // => OK (signed response)
243 response = IssueRequest(s.handler,
246 uri: unsignedLocator,
247 requestBody: TestBlock,
248 apiToken: knownToken,
252 "Authenticated PUT, signed locator, with server key",
253 http.StatusOK, response)
254 responseLocator := strings.TrimSpace(response.Body.String())
255 if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
256 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
257 "response '%s' does not contain a valid signature",
261 // Unauthenticated PUT, unsigned locator
263 response = IssueRequest(s.handler,
266 uri: unsignedLocator,
267 requestBody: TestBlock,
271 "Unauthenticated PUT, unsigned locator, with server key",
272 http.StatusOK, response)
274 "Unauthenticated PUT, unsigned locator, with server key",
275 TestHashPutResp, response)
278 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
279 s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
280 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
282 s.cluster.SystemRootToken = "fake-data-manager-token"
283 IssueRequest(s.handler,
287 requestBody: TestBlock,
290 s.cluster.Collections.BlobTrash = true
291 IssueRequest(s.handler,
295 requestBody: TestBlock,
296 apiToken: s.cluster.SystemRootToken,
303 for _, e := range []expect{
304 {"zzzzz-nyw5e-000000000000000", "Get", 0},
305 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
306 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
307 {"zzzzz-nyw5e-000000000000000", "Put", 0},
308 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
309 {"zzzzz-nyw5e-111111111111111", "Get", 0},
310 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
311 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
312 {"zzzzz-nyw5e-111111111111111", "Put", 1},
313 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
315 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
316 c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
321 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
322 s.cluster.Volumes = map[string]arvados.Volume{
323 "zzzzz-nyw5e-111111111111111": {
326 StorageClasses: map[string]bool{"class1": true}},
327 "zzzzz-nyw5e-222222222222222": {
330 StorageClasses: map[string]bool{"class2": true, "class3": true}},
333 for _, trial := range []struct {
334 priority1 int // priority of class1, thus vol1
335 priority2 int // priority of class2
336 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
337 get1 int // expected number of "get" ops on vol1
338 get2 int // expected number of "get" ops on vol2
340 {100, 50, 50, 1, 0}, // class1 has higher priority => try vol1 first, no need to try vol2
341 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
342 {66, 99, 33, 1, 1}, // class2 has higher priority => try vol2 first, then try vol1
343 {66, 33, 99, 1, 1}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
346 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
347 "class1": {Priority: trial.priority1},
348 "class2": {Priority: trial.priority2},
349 "class3": {Priority: trial.priority3},
351 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
352 IssueRequest(s.handler,
356 requestBody: TestBlock,
357 storageClasses: "class1",
359 IssueRequest(s.handler,
364 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
365 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
369 func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
370 s.cluster.Volumes = map[string]arvados.Volume{
371 "zzzzz-nyw5e-111111111111111": {
375 StorageClasses: map[string]bool{"class1": true}},
377 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
378 resp := IssueRequest(s.handler,
382 requestBody: TestBlock,
383 storageClasses: "class1",
385 c.Check(resp.Code, check.Equals, FullError.HTTPCode)
386 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
389 func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
390 s.cluster.Volumes = map[string]arvados.Volume{
391 "zzzzz-nyw5e-111111111111111": {
394 StorageClasses: map[string]bool{"class1": true}},
395 "zzzzz-nyw5e-121212121212121": {
398 StorageClasses: map[string]bool{"class1": true, "class2": true}},
399 "zzzzz-nyw5e-222222222222222": {
402 StorageClasses: map[string]bool{"class2": true}},
405 for _, trial := range []struct {
406 setCounter uint32 // value to stuff vm.counter, to control offset
407 classes string // desired classes
408 put111 int // expected number of "put" ops on 11111... after 2x put reqs
409 put121 int // expected number of "put" ops on 12121...
410 put222 int // expected number of "put" ops on 22222...
411 cmp111 int // expected number of "compare" ops on 11111... after 2x put reqs
412 cmp121 int // expected number of "compare" ops on 12121...
413 cmp222 int // expected number of "compare" ops on 22222...
417 2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
420 0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
423 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
425 0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
426 2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
427 {0, "class1,class2,class404",
429 2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
432 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
437 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
438 atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
439 for i := 0; i < 2; i++ {
440 IssueRequest(s.handler,
444 requestBody: TestBlock,
445 storageClasses: trial.classes,
448 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
449 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
450 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
451 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
452 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
453 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
457 // Test TOUCH requests.
458 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
459 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
460 vols := s.handler.volmgr.AllWritable()
461 vols[0].Put(context.Background(), TestHash, TestBlock)
462 vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
463 afterPut := time.Now()
464 t, err := vols[0].Mtime(TestHash)
465 c.Assert(err, check.IsNil)
466 c.Assert(t.Before(afterPut), check.Equals, true)
469 "touch with no credentials",
470 http.StatusUnauthorized,
471 IssueRequest(s.handler, &RequestTester{
477 "touch with non-root credentials",
478 http.StatusUnauthorized,
479 IssueRequest(s.handler, &RequestTester{
482 apiToken: arvadostest.ActiveTokenV2,
486 "touch non-existent block",
488 IssueRequest(s.handler, &RequestTester{
490 uri: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
491 apiToken: s.cluster.SystemRootToken,
494 beforeTouch := time.Now()
498 IssueRequest(s.handler, &RequestTester{
501 apiToken: s.cluster.SystemRootToken,
503 t, err = vols[0].Mtime(TestHash)
504 c.Assert(err, check.IsNil)
505 c.Assert(t.After(beforeTouch), check.Equals, true)
508 // Test /index requests:
509 // - unauthenticated /index request
510 // - unauthenticated /index/prefix request
511 // - authenticated /index request | non-superuser
512 // - authenticated /index/prefix request | non-superuser
513 // - authenticated /index request | superuser
514 // - authenticated /index/prefix request | superuser
516 // The only /index requests that should succeed are those issued by the
517 // superuser. They should pass regardless of the value of BlobSigning.
518 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
519 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
521 // Include multiple blocks on different volumes, and
522 // some metadata files (which should be omitted from index listings)
523 vols := s.handler.volmgr.AllWritable()
524 vols[0].Put(context.Background(), TestHash, TestBlock)
525 vols[1].Put(context.Background(), TestHash2, TestBlock2)
526 vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
527 vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
529 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
531 unauthenticatedReq := &RequestTester{
535 authenticatedReq := &RequestTester{
538 apiToken: knownToken,
540 superuserReq := &RequestTester{
543 apiToken: s.cluster.SystemRootToken,
545 unauthPrefixReq := &RequestTester{
547 uri: "/index/" + TestHash[0:3],
549 authPrefixReq := &RequestTester{
551 uri: "/index/" + TestHash[0:3],
552 apiToken: knownToken,
554 superuserPrefixReq := &RequestTester{
556 uri: "/index/" + TestHash[0:3],
557 apiToken: s.cluster.SystemRootToken,
559 superuserNoSuchPrefixReq := &RequestTester{
562 apiToken: s.cluster.SystemRootToken,
564 superuserInvalidPrefixReq := &RequestTester{
567 apiToken: s.cluster.SystemRootToken,
570 // -------------------------------------------------------------
571 // Only the superuser should be allowed to issue /index requests.
573 // ---------------------------
574 // BlobSigning enabled
575 // This setting should not affect tests passing.
576 s.cluster.Collections.BlobSigning = true
578 // unauthenticated /index request
579 // => UnauthorizedError
580 response := IssueRequest(s.handler, unauthenticatedReq)
582 "permissions on, unauthenticated request",
583 UnauthorizedError.HTTPCode,
586 // unauthenticated /index/prefix request
587 // => UnauthorizedError
588 response = IssueRequest(s.handler, unauthPrefixReq)
590 "permissions on, unauthenticated /index/prefix request",
591 UnauthorizedError.HTTPCode,
594 // authenticated /index request, non-superuser
595 // => UnauthorizedError
596 response = IssueRequest(s.handler, authenticatedReq)
598 "permissions on, authenticated request, non-superuser",
599 UnauthorizedError.HTTPCode,
602 // authenticated /index/prefix request, non-superuser
603 // => UnauthorizedError
604 response = IssueRequest(s.handler, authPrefixReq)
606 "permissions on, authenticated /index/prefix request, non-superuser",
607 UnauthorizedError.HTTPCode,
610 // superuser /index request
612 response = IssueRequest(s.handler, superuserReq)
614 "permissions on, superuser request",
618 // ----------------------------
619 // BlobSigning disabled
620 // Valid Request should still pass.
621 s.cluster.Collections.BlobSigning = false
623 // superuser /index request
625 response = IssueRequest(s.handler, superuserReq)
627 "permissions on, superuser request",
631 expected := `^` + TestHash + `\+\d+ \d+\n` +
632 TestHash2 + `\+\d+ \d+\n\n$`
633 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
634 "permissions on, superuser request"))
636 // superuser /index/prefix request
638 response = IssueRequest(s.handler, superuserPrefixReq)
640 "permissions on, superuser request",
644 expected = `^` + TestHash + `\+\d+ \d+\n\n$`
645 c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
646 "permissions on, superuser /index/prefix request"))
648 // superuser /index/{no-such-prefix} request
650 response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
652 "permissions on, superuser request",
656 if "\n" != response.Body.String() {
657 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
660 // superuser /index/{invalid-prefix} request
661 // => StatusBadRequest
662 response = IssueRequest(s.handler, superuserInvalidPrefixReq)
664 "permissions on, superuser request",
665 http.StatusBadRequest,
673 // With no token and with a non-data-manager token:
674 // * Delete existing block
675 // (test for 403 Forbidden, confirm block not deleted)
677 // With data manager token:
679 // * Delete existing block
680 // (test for 200 OK, response counts, confirm block deleted)
682 // * Delete nonexistent block
683 // (test for 200 OK, response counts)
687 // * Delete block on read-only and read-write volume
688 // (test for 200 OK, response with copies_deleted=1,
689 // copies_failed=1, confirm block deleted only on r/w volume)
691 // * Delete block on read-only volume only
692 // (test for 200 OK, response with copies_deleted=0, copies_failed=1,
693 // confirm block not deleted)
694 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
695 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
697 vols := s.handler.volmgr.AllWritable()
698 vols[0].Put(context.Background(), TestHash, TestBlock)
700 // Explicitly set the BlobSigningTTL to 0 for these
701 // tests, to ensure the MockVolume deletes the blocks
702 // even though they have just been created.
703 s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
705 var userToken = "NOT DATA MANAGER TOKEN"
706 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
708 s.cluster.Collections.BlobTrash = true
710 unauthReq := &RequestTester{
715 userReq := &RequestTester{
721 superuserExistingBlockReq := &RequestTester{
724 apiToken: s.cluster.SystemRootToken,
727 superuserNonexistentBlockReq := &RequestTester{
729 uri: "/" + TestHash2,
730 apiToken: s.cluster.SystemRootToken,
733 // Unauthenticated request returns PermissionError.
734 var response *httptest.ResponseRecorder
735 response = IssueRequest(s.handler, unauthReq)
737 "unauthenticated request",
738 PermissionError.HTTPCode,
741 // Authenticated non-admin request returns PermissionError.
742 response = IssueRequest(s.handler, userReq)
744 "authenticated non-admin request",
745 PermissionError.HTTPCode,
748 // Authenticated admin request for nonexistent block.
749 type deletecounter struct {
750 Deleted int `json:"copies_deleted"`
751 Failed int `json:"copies_failed"`
753 var responseDc, expectedDc deletecounter
755 response = IssueRequest(s.handler, superuserNonexistentBlockReq)
757 "data manager request, nonexistent block",
761 // Authenticated admin request for existing block while BlobTrash is false.
762 s.cluster.Collections.BlobTrash = false
763 response = IssueRequest(s.handler, superuserExistingBlockReq)
765 "authenticated request, existing block, method disabled",
766 MethodDisabledError.HTTPCode,
768 s.cluster.Collections.BlobTrash = true
770 // Authenticated admin request for existing block.
771 response = IssueRequest(s.handler, superuserExistingBlockReq)
773 "data manager request, existing block",
776 // Expect response {"copies_deleted":1,"copies_failed":0}
777 expectedDc = deletecounter{1, 0}
778 json.NewDecoder(response.Body).Decode(&responseDc)
779 if responseDc != expectedDc {
780 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
781 expectedDc, responseDc)
783 // Confirm the block has been deleted
784 buf := make([]byte, BlockSize)
785 _, err := vols[0].Get(context.Background(), TestHash, buf)
786 var blockDeleted = os.IsNotExist(err)
788 c.Error("superuserExistingBlockReq: block not deleted")
791 // A DELETE request on a block newer than BlobSigningTTL
792 // should return success but leave the block on the volume.
793 vols[0].Put(context.Background(), TestHash, TestBlock)
794 s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
796 response = IssueRequest(s.handler, superuserExistingBlockReq)
798 "data manager request, existing block",
801 // Expect response {"copies_deleted":1,"copies_failed":0}
802 expectedDc = deletecounter{1, 0}
803 json.NewDecoder(response.Body).Decode(&responseDc)
804 if responseDc != expectedDc {
805 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
806 expectedDc, responseDc)
808 // Confirm the block has NOT been deleted.
809 _, err = vols[0].Get(context.Background(), TestHash, buf)
811 c.Errorf("testing delete on new block: %s\n", err)
817 // Test handling of the PUT /pull statement.
819 // Cases tested: syntactically valid and invalid pull lists, from the
820 // data manager and from unprivileged users:
822 // 1. Valid pull list from an ordinary user
823 // (expected result: 401 Unauthorized)
825 // 2. Invalid pull request from an ordinary user
826 // (expected result: 401 Unauthorized)
828 // 3. Valid pull request from the data manager
829 // (expected result: 200 OK with request body "Received 3 pull
832 // 4. Invalid pull request from the data manager
833 // (expected result: 400 Bad Request)
835 // Test that in the end, the pull manager received a good pull list with
836 // the expected number of requests.
838 // TODO(twp): test concurrency: launch 100 goroutines to update the
839 // pull list simultaneously. Make sure that none of them return 400
840 // Bad Request and that pullq.GetList() returns a valid list.
841 func (s *HandlerSuite) TestPullHandler(c *check.C) {
842 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
844 // Replace the router's pullq -- which the worker goroutines
845 // started by setup() are now receiving from -- with a new
846 // one, so we can see what the handler sends to it.
847 pullq := NewWorkQueue()
848 s.handler.Handler.(*router).pullq = pullq
850 var userToken = "USER TOKEN"
851 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
853 goodJSON := []byte(`[
855 "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
862 "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
866 "locator":"cccccccccccccccccccccccccccccccc+12345",
867 "servers":["http://server1"]
871 badJSON := []byte(`{ "key":"I'm a little teapot" }`)
873 type pullTest struct {
879 var testcases = []pullTest{
881 "Valid pull list from an ordinary user",
882 RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
883 http.StatusUnauthorized,
887 "Invalid pull request from an ordinary user",
888 RequestTester{"/pull", userToken, "PUT", badJSON, ""},
889 http.StatusUnauthorized,
893 "Valid pull request from the data manager",
894 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
896 "Received 3 pull requests\n",
899 "Invalid pull request from the data manager",
900 RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
901 http.StatusBadRequest,
906 for _, tst := range testcases {
907 response := IssueRequest(s.handler, &tst.req)
908 ExpectStatusCode(c, tst.name, tst.responseCode, response)
909 ExpectBody(c, tst.name, tst.responseBody, response)
912 // The Keep pull manager should have received one good list with 3
914 for i := 0; i < 3; i++ {
917 case item = <-pullq.NextItem:
918 case <-time.After(time.Second):
921 if _, ok := item.(PullRequest); !ok {
922 c.Errorf("item %v could not be parsed as a PullRequest", item)
926 expectChannelEmpty(c, pullq.NextItem)
933 // Cases tested: syntactically valid and invalid trash lists, from the
934 // data manager and from unprivileged users:
936 // 1. Valid trash list from an ordinary user
937 // (expected result: 401 Unauthorized)
939 // 2. Invalid trash list from an ordinary user
940 // (expected result: 401 Unauthorized)
942 // 3. Valid trash list from the data manager
943 // (expected result: 200 OK with request body "Received 3 trash
946 // 4. Invalid trash list from the data manager
947 // (expected result: 400 Bad Request)
949 // Test that in the end, the trash collector received a good list
950 // trash list with the expected number of requests.
952 // TODO(twp): test concurrency: launch 100 goroutines to update the
953 // pull list simultaneously. Make sure that none of them return 400
954 // Bad Request and that replica.Dump() returns a valid list.
955 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
956 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
957 // Replace the router's trashq -- which the worker goroutines
958 // started by setup() are now receiving from -- with a new
959 // one, so we can see what the handler sends to it.
960 trashq := NewWorkQueue()
961 s.handler.Handler.(*router).trashq = trashq
963 var userToken = "USER TOKEN"
964 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
966 goodJSON := []byte(`[
969 "block_mtime":1409082153
973 "block_mtime":1409082153
977 "block_mtime":1409082153
981 badJSON := []byte(`I am not a valid JSON string`)
983 type trashTest struct {
990 var testcases = []trashTest{
992 "Valid trash list from an ordinary user",
993 RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
994 http.StatusUnauthorized,
998 "Invalid trash list from an ordinary user",
999 RequestTester{"/trash", userToken, "PUT", badJSON, ""},
1000 http.StatusUnauthorized,
1004 "Valid trash list from the data manager",
1005 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
1007 "Received 3 trash requests\n",
1010 "Invalid trash list from the data manager",
1011 RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
1012 http.StatusBadRequest,
1017 for _, tst := range testcases {
1018 response := IssueRequest(s.handler, &tst.req)
1019 ExpectStatusCode(c, tst.name, tst.responseCode, response)
1020 ExpectBody(c, tst.name, tst.responseBody, response)
1023 // The trash collector should have received one good list with 3
1025 for i := 0; i < 3; i++ {
1026 item := <-trashq.NextItem
1027 if _, ok := item.(TrashRequest); !ok {
1028 c.Errorf("item %v could not be parsed as a TrashRequest", item)
1032 expectChannelEmpty(c, trashq.NextItem)
1035 // ====================
1037 // ====================
1039 // IssueTestRequest executes an HTTP request described by rt, to a
1040 // REST router. It returns the HTTP response to the request.
1041 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1042 response := httptest.NewRecorder()
1043 body := bytes.NewReader(rt.requestBody)
1044 req, _ := http.NewRequest(rt.method, rt.uri, body)
1045 if rt.apiToken != "" {
1046 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
1048 if rt.storageClasses != "" {
1049 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
1051 handler.ServeHTTP(response, req)
1055 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1056 response := httptest.NewRecorder()
1057 body := bytes.NewReader(rt.requestBody)
1058 req, _ := http.NewRequest(rt.method, rt.uri, body)
1059 if rt.apiToken != "" {
1060 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
1062 handler.ServeHTTP(response, req)
1066 // ExpectStatusCode checks whether a response has the specified status code,
1067 // and reports a test failure if not.
1068 func ExpectStatusCode(
1072 response *httptest.ResponseRecorder) {
1073 c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
1079 expectedBody string,
1080 response *httptest.ResponseRecorder) {
1081 if expectedBody != "" && response.Body.String() != expectedBody {
1082 c.Errorf("%s: expected response body '%s', got %+v",
1083 testname, expectedBody, response)
1088 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1089 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1091 defer func(orig *bufferPool) {
1094 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1096 ok := make(chan struct{})
1098 for i := 0; i < 2; i++ {
1099 response := IssueRequest(s.handler,
1102 uri: "/" + TestHash,
1103 requestBody: TestBlock,
1106 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1113 case <-time.After(time.Second):
1114 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1118 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1120 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1121 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1123 ok := make(chan bool)
1125 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1126 // Unauthenticated request, no server key
1127 // => OK (unsigned response)
1128 unsignedLocator := "/" + TestHash
1129 response := IssueRequest(s.handler,
1132 uri: unsignedLocator,
1133 requestBody: TestBlock,
1136 "TestPutHandlerBufferleak", http.StatusOK, response)
1138 "TestPutHandlerBufferleak",
1139 TestHashPutResp, response)
1144 case <-time.After(20 * time.Second):
1145 // If the buffer pool leaks, the test goroutine hangs.
1146 c.Fatal("test did not finish, assuming pool leaked")
1151 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1152 s.cluster.Collections.BlobSigning = false
1153 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1155 defer func(orig *bufferPool) {
1158 bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1159 defer bufs.Put(bufs.Get(BlockSize))
1161 err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
1162 c.Assert(err, check.IsNil)
1164 resp := httptest.NewRecorder()
1165 ok := make(chan struct{})
1167 ctx, cancel := context.WithCancel(context.Background())
1168 req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1170 s.handler.ServeHTTP(resp, req)
1175 case <-time.After(20 * time.Second):
1176 c.Fatal("request took >20s, close notifier must be broken")
1180 ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
1181 for i, v := range s.handler.volmgr.AllWritable() {
1182 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1183 c.Errorf("volume %d got %d calls, expected 0", i, calls)
1188 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1190 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1191 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1193 vols := s.handler.volmgr.AllWritable()
1194 if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1198 ok := make(chan bool)
1200 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1201 // Unauthenticated request, unsigned locator
1203 unsignedLocator := "/" + TestHash
1204 response := IssueRequest(s.handler,
1207 uri: unsignedLocator,
1210 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1212 "Unauthenticated request, unsigned locator",
1219 case <-time.After(20 * time.Second):
1220 // If the buffer pool leaks, the test goroutine hangs.
1221 c.Fatal("test did not finish, assuming pool leaked")
1226 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1227 s.cluster.Volumes = map[string]arvados.Volume{
1228 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1229 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1230 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1232 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1233 rt := RequestTester{
1235 uri: "/" + TestHash,
1236 requestBody: TestBlock,
1239 for _, trial := range []struct {
1244 {"default", "default=1"},
1245 {" , default , default , ", "default=1"},
1246 {"special", "extra=1, special=1"},
1247 {"special, readonly", "extra=1, special=1"},
1248 {"special, nonexistent", "extra=1, special=1"},
1249 {"extra, special", "extra=1, special=1"},
1250 {"default, special", "default=1, extra=1, special=1"},
1252 c.Logf("success case %#v", trial)
1253 rt.storageClasses = trial.ask
1254 resp := IssueRequest(s.handler, &rt)
1255 if trial.expect == "" {
1256 // any non-empty value is correct
1257 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1259 c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1263 for _, trial := range []struct {
1267 {"doesnotexist, readonly"},
1270 c.Logf("failure case %#v", trial)
1271 rt.storageClasses = trial.ask
1272 resp := IssueRequest(s.handler, &rt)
1273 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1277 func sortCommaSeparated(s string) string {
1278 slice := strings.Split(s, ", ")
1280 return strings.Join(slice, ", ")
1283 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1284 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1286 resp := IssueRequest(s.handler, &RequestTester{
1288 uri: "/" + TestHash,
1289 requestBody: TestBlock,
1292 c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1293 c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1296 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1297 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1299 // Set up Keep volumes
1300 vols := s.handler.volmgr.AllWritable()
1301 vols[0].Put(context.Background(), TestHash, TestBlock)
1303 s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1305 // unauthenticatedReq => UnauthorizedError
1306 unauthenticatedReq := &RequestTester{
1308 uri: "/untrash/" + TestHash,
1310 response := IssueRequest(s.handler, unauthenticatedReq)
1312 "Unauthenticated request",
1313 UnauthorizedError.HTTPCode,
1316 // notDataManagerReq => UnauthorizedError
1317 notDataManagerReq := &RequestTester{
1319 uri: "/untrash/" + TestHash,
1320 apiToken: knownToken,
1323 response = IssueRequest(s.handler, notDataManagerReq)
1325 "Non-datamanager token",
1326 UnauthorizedError.HTTPCode,
1329 // datamanagerWithBadHashReq => StatusBadRequest
1330 datamanagerWithBadHashReq := &RequestTester{
1332 uri: "/untrash/thisisnotalocator",
1333 apiToken: s.cluster.SystemRootToken,
1335 response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1337 "Bad locator in untrash request",
1338 http.StatusBadRequest,
1341 // datamanagerWrongMethodReq => StatusBadRequest
1342 datamanagerWrongMethodReq := &RequestTester{
1344 uri: "/untrash/" + TestHash,
1345 apiToken: s.cluster.SystemRootToken,
1347 response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1349 "Only PUT method is supported for untrash",
1350 http.StatusMethodNotAllowed,
1353 // datamanagerReq => StatusOK
1354 datamanagerReq := &RequestTester{
1356 uri: "/untrash/" + TestHash,
1357 apiToken: s.cluster.SystemRootToken,
1359 response = IssueRequest(s.handler, datamanagerReq)
1364 c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1367 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1368 // Change all volumes to read-only
1369 for uuid, v := range s.cluster.Volumes {
1371 s.cluster.Volumes[uuid] = v
1373 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1375 // datamanagerReq => StatusOK
1376 datamanagerReq := &RequestTester{
1378 uri: "/untrash/" + TestHash,
1379 apiToken: s.cluster.SystemRootToken,
1381 response := IssueRequest(s.handler, datamanagerReq)
1383 "No writable volumes",
1384 http.StatusNotFound,
1388 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1389 s.cluster.ManagementToken = arvadostest.ManagementToken
1390 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1391 pingReq := &RequestTester{
1393 uri: "/_health/ping",
1394 apiToken: arvadostest.ManagementToken,
1396 response := IssueHealthCheckRequest(s.handler, pingReq)
1401 want := `{"health":"OK"}`
1402 if !strings.Contains(response.Body.String(), want) {
1403 c.Errorf("expected response to include %s: got %s", want, response.Body.String())