Merge branch 'patch-1' of https://github.com/mr-c/arvados into mr-c-patch-1
[arvados.git] / services / keepstore / handler_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Tests for Keep HTTP handlers:
6 //
7 //     GetBlockHandler
8 //     PutBlockHandler
9 //     IndexHandler
10 //
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
13
14 package main
15
16 import (
17         "bytes"
18         "context"
19         "encoding/json"
20         "fmt"
21         "net/http"
22         "net/http/httptest"
23         "os"
24         "regexp"
25         "strings"
26         "time"
27
28         "git.arvados.org/arvados.git/lib/config"
29         "git.arvados.org/arvados.git/sdk/go/arvados"
30         "git.arvados.org/arvados.git/sdk/go/arvadostest"
31         "git.arvados.org/arvados.git/sdk/go/ctxlog"
32         "github.com/prometheus/client_golang/prometheus"
33         check "gopkg.in/check.v1"
34 )
35
36 var testServiceURL = func() arvados.URL {
37         return arvados.URL{Host: "localhost:12345", Scheme: "http"}
38 }()
39
40 func testCluster(t TB) *arvados.Cluster {
41         cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
42         if err != nil {
43                 t.Fatal(err)
44         }
45         cluster, err := cfg.GetCluster("")
46         if err != nil {
47                 t.Fatal(err)
48         }
49         cluster.SystemRootToken = arvadostest.SystemRootToken
50         cluster.ManagementToken = arvadostest.ManagementToken
51         cluster.Collections.BlobSigning = false
52         return cluster
53 }
54
55 var _ = check.Suite(&HandlerSuite{})
56
57 type HandlerSuite struct {
58         cluster *arvados.Cluster
59         handler *handler
60 }
61
62 func (s *HandlerSuite) SetUpTest(c *check.C) {
63         s.cluster = testCluster(c)
64         s.cluster.Volumes = map[string]arvados.Volume{
65                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
66                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
67         }
68         s.handler = &handler{}
69 }
70
71 // A RequestTester represents the parameters for an HTTP request to
72 // be issued on behalf of a unit test.
73 type RequestTester struct {
74         uri         string
75         apiToken    string
76         method      string
77         requestBody []byte
78 }
79
80 // Test GetBlockHandler on the following situations:
81 //   - permissions off, unauthenticated request, unsigned locator
82 //   - permissions on, authenticated request, signed locator
83 //   - permissions on, authenticated request, unsigned locator
84 //   - permissions on, unauthenticated request, signed locator
85 //   - permissions on, authenticated request, expired locator
86 //   - permissions on, authenticated request, signed locator, transient error from backend
87 //
88 func (s *HandlerSuite) TestGetHandler(c *check.C) {
89         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
90
91         vols := s.handler.volmgr.AllWritable()
92         err := vols[0].Put(context.Background(), TestHash, TestBlock)
93         c.Check(err, check.IsNil)
94
95         // Create locators for testing.
96         // Turn on permission settings so we can generate signed locators.
97         s.cluster.Collections.BlobSigning = true
98         s.cluster.Collections.BlobSigningKey = knownKey
99         s.cluster.Collections.BlobSigningTTL.Set("5m")
100
101         var (
102                 unsignedLocator  = "/" + TestHash
103                 validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
104                 expiredTimestamp = time.Now().Add(-time.Hour)
105                 signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
106                 expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
107         )
108
109         // -----------------
110         // Test unauthenticated request with permissions off.
111         s.cluster.Collections.BlobSigning = false
112
113         // Unauthenticated request, unsigned locator
114         // => OK
115         response := IssueRequest(s.handler,
116                 &RequestTester{
117                         method: "GET",
118                         uri:    unsignedLocator,
119                 })
120         ExpectStatusCode(c,
121                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
122         ExpectBody(c,
123                 "Unauthenticated request, unsigned locator",
124                 string(TestBlock),
125                 response)
126
127         receivedLen := response.Header().Get("Content-Length")
128         expectedLen := fmt.Sprintf("%d", len(TestBlock))
129         if receivedLen != expectedLen {
130                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
131         }
132
133         // ----------------
134         // Permissions: on.
135         s.cluster.Collections.BlobSigning = true
136
137         // Authenticated request, signed locator
138         // => OK
139         response = IssueRequest(s.handler, &RequestTester{
140                 method:   "GET",
141                 uri:      signedLocator,
142                 apiToken: knownToken,
143         })
144         ExpectStatusCode(c,
145                 "Authenticated request, signed locator", http.StatusOK, response)
146         ExpectBody(c,
147                 "Authenticated request, signed locator", string(TestBlock), response)
148
149         receivedLen = response.Header().Get("Content-Length")
150         expectedLen = fmt.Sprintf("%d", len(TestBlock))
151         if receivedLen != expectedLen {
152                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
153         }
154
155         // Authenticated request, unsigned locator
156         // => PermissionError
157         response = IssueRequest(s.handler, &RequestTester{
158                 method:   "GET",
159                 uri:      unsignedLocator,
160                 apiToken: knownToken,
161         })
162         ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
163
164         // Unauthenticated request, signed locator
165         // => PermissionError
166         response = IssueRequest(s.handler, &RequestTester{
167                 method: "GET",
168                 uri:    signedLocator,
169         })
170         ExpectStatusCode(c,
171                 "Unauthenticated request, signed locator",
172                 PermissionError.HTTPCode, response)
173
174         // Authenticated request, expired locator
175         // => ExpiredError
176         response = IssueRequest(s.handler, &RequestTester{
177                 method:   "GET",
178                 uri:      expiredLocator,
179                 apiToken: knownToken,
180         })
181         ExpectStatusCode(c,
182                 "Authenticated request, expired locator",
183                 ExpiredError.HTTPCode, response)
184
185         // Authenticated request, signed locator
186         // => 503 Server busy (transient error)
187
188         // Set up the block owning volume to respond with errors
189         vols[0].Volume.(*MockVolume).Bad = true
190         vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
191         response = IssueRequest(s.handler, &RequestTester{
192                 method:   "GET",
193                 uri:      signedLocator,
194                 apiToken: knownToken,
195         })
196         // A transient error from one volume while the other doesn't find the block
197         // should make the service return a 503 so that clients can retry.
198         ExpectStatusCode(c,
199                 "Volume backend busy",
200                 503, response)
201 }
202
203 // Test PutBlockHandler on the following situations:
204 //   - no server key
205 //   - with server key, authenticated request, unsigned locator
206 //   - with server key, unauthenticated request, unsigned locator
207 //
208 func (s *HandlerSuite) TestPutHandler(c *check.C) {
209         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
210
211         // --------------
212         // No server key.
213
214         s.cluster.Collections.BlobSigningKey = ""
215
216         // Unauthenticated request, no server key
217         // => OK (unsigned response)
218         unsignedLocator := "/" + TestHash
219         response := IssueRequest(s.handler,
220                 &RequestTester{
221                         method:      "PUT",
222                         uri:         unsignedLocator,
223                         requestBody: TestBlock,
224                 })
225
226         ExpectStatusCode(c,
227                 "Unauthenticated request, no server key", http.StatusOK, response)
228         ExpectBody(c,
229                 "Unauthenticated request, no server key",
230                 TestHashPutResp, response)
231
232         // ------------------
233         // With a server key.
234
235         s.cluster.Collections.BlobSigningKey = knownKey
236         s.cluster.Collections.BlobSigningTTL.Set("5m")
237
238         // When a permission key is available, the locator returned
239         // from an authenticated PUT request will be signed.
240
241         // Authenticated PUT, signed locator
242         // => OK (signed response)
243         response = IssueRequest(s.handler,
244                 &RequestTester{
245                         method:      "PUT",
246                         uri:         unsignedLocator,
247                         requestBody: TestBlock,
248                         apiToken:    knownToken,
249                 })
250
251         ExpectStatusCode(c,
252                 "Authenticated PUT, signed locator, with server key",
253                 http.StatusOK, response)
254         responseLocator := strings.TrimSpace(response.Body.String())
255         if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
256                 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
257                         "response '%s' does not contain a valid signature",
258                         responseLocator)
259         }
260
261         // Unauthenticated PUT, unsigned locator
262         // => OK
263         response = IssueRequest(s.handler,
264                 &RequestTester{
265                         method:      "PUT",
266                         uri:         unsignedLocator,
267                         requestBody: TestBlock,
268                 })
269
270         ExpectStatusCode(c,
271                 "Unauthenticated PUT, unsigned locator, with server key",
272                 http.StatusOK, response)
273         ExpectBody(c,
274                 "Unauthenticated PUT, unsigned locator, with server key",
275                 TestHashPutResp, response)
276 }
277
278 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
279         s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
280         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
281
282         s.cluster.SystemRootToken = "fake-data-manager-token"
283         IssueRequest(s.handler,
284                 &RequestTester{
285                         method:      "PUT",
286                         uri:         "/" + TestHash,
287                         requestBody: TestBlock,
288                 })
289
290         s.cluster.Collections.BlobTrash = true
291         IssueRequest(s.handler,
292                 &RequestTester{
293                         method:      "DELETE",
294                         uri:         "/" + TestHash,
295                         requestBody: TestBlock,
296                         apiToken:    s.cluster.SystemRootToken,
297                 })
298         type expect struct {
299                 volid     string
300                 method    string
301                 callcount int
302         }
303         for _, e := range []expect{
304                 {"zzzzz-nyw5e-000000000000000", "Get", 0},
305                 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
306                 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
307                 {"zzzzz-nyw5e-000000000000000", "Put", 0},
308                 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
309                 {"zzzzz-nyw5e-111111111111111", "Get", 0},
310                 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
311                 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
312                 {"zzzzz-nyw5e-111111111111111", "Put", 1},
313                 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
314         } {
315                 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
316                         c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
317                 }
318         }
319 }
320
321 // Test TOUCH requests.
322 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
323         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
324         vols := s.handler.volmgr.AllWritable()
325         vols[0].Put(context.Background(), TestHash, TestBlock)
326         vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
327         afterPut := time.Now()
328         t, err := vols[0].Mtime(TestHash)
329         c.Assert(err, check.IsNil)
330         c.Assert(t.Before(afterPut), check.Equals, true)
331
332         ExpectStatusCode(c,
333                 "touch with no credentials",
334                 http.StatusUnauthorized,
335                 IssueRequest(s.handler, &RequestTester{
336                         method: "TOUCH",
337                         uri:    "/" + TestHash,
338                 }))
339
340         ExpectStatusCode(c,
341                 "touch with non-root credentials",
342                 http.StatusUnauthorized,
343                 IssueRequest(s.handler, &RequestTester{
344                         method:   "TOUCH",
345                         uri:      "/" + TestHash,
346                         apiToken: arvadostest.ActiveTokenV2,
347                 }))
348
349         ExpectStatusCode(c,
350                 "touch non-existent block",
351                 http.StatusNotFound,
352                 IssueRequest(s.handler, &RequestTester{
353                         method:   "TOUCH",
354                         uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
355                         apiToken: s.cluster.SystemRootToken,
356                 }))
357
358         beforeTouch := time.Now()
359         ExpectStatusCode(c,
360                 "touch block",
361                 http.StatusOK,
362                 IssueRequest(s.handler, &RequestTester{
363                         method:   "TOUCH",
364                         uri:      "/" + TestHash,
365                         apiToken: s.cluster.SystemRootToken,
366                 }))
367         t, err = vols[0].Mtime(TestHash)
368         c.Assert(err, check.IsNil)
369         c.Assert(t.After(beforeTouch), check.Equals, true)
370 }
371
372 // Test /index requests:
373 //   - unauthenticated /index request
374 //   - unauthenticated /index/prefix request
375 //   - authenticated   /index request        | non-superuser
376 //   - authenticated   /index/prefix request | non-superuser
377 //   - authenticated   /index request        | superuser
378 //   - authenticated   /index/prefix request | superuser
379 //
380 // The only /index requests that should succeed are those issued by the
381 // superuser. They should pass regardless of the value of BlobSigning.
382 //
383 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
384         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
385
386         // Include multiple blocks on different volumes, and
387         // some metadata files (which should be omitted from index listings)
388         vols := s.handler.volmgr.AllWritable()
389         vols[0].Put(context.Background(), TestHash, TestBlock)
390         vols[1].Put(context.Background(), TestHash2, TestBlock2)
391         vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
392         vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
393
394         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
395
396         unauthenticatedReq := &RequestTester{
397                 method: "GET",
398                 uri:    "/index",
399         }
400         authenticatedReq := &RequestTester{
401                 method:   "GET",
402                 uri:      "/index",
403                 apiToken: knownToken,
404         }
405         superuserReq := &RequestTester{
406                 method:   "GET",
407                 uri:      "/index",
408                 apiToken: s.cluster.SystemRootToken,
409         }
410         unauthPrefixReq := &RequestTester{
411                 method: "GET",
412                 uri:    "/index/" + TestHash[0:3],
413         }
414         authPrefixReq := &RequestTester{
415                 method:   "GET",
416                 uri:      "/index/" + TestHash[0:3],
417                 apiToken: knownToken,
418         }
419         superuserPrefixReq := &RequestTester{
420                 method:   "GET",
421                 uri:      "/index/" + TestHash[0:3],
422                 apiToken: s.cluster.SystemRootToken,
423         }
424         superuserNoSuchPrefixReq := &RequestTester{
425                 method:   "GET",
426                 uri:      "/index/abcd",
427                 apiToken: s.cluster.SystemRootToken,
428         }
429         superuserInvalidPrefixReq := &RequestTester{
430                 method:   "GET",
431                 uri:      "/index/xyz",
432                 apiToken: s.cluster.SystemRootToken,
433         }
434
435         // -------------------------------------------------------------
436         // Only the superuser should be allowed to issue /index requests.
437
438         // ---------------------------
439         // BlobSigning enabled
440         // This setting should not affect tests passing.
441         s.cluster.Collections.BlobSigning = true
442
443         // unauthenticated /index request
444         // => UnauthorizedError
445         response := IssueRequest(s.handler, unauthenticatedReq)
446         ExpectStatusCode(c,
447                 "permissions on, unauthenticated request",
448                 UnauthorizedError.HTTPCode,
449                 response)
450
451         // unauthenticated /index/prefix request
452         // => UnauthorizedError
453         response = IssueRequest(s.handler, unauthPrefixReq)
454         ExpectStatusCode(c,
455                 "permissions on, unauthenticated /index/prefix request",
456                 UnauthorizedError.HTTPCode,
457                 response)
458
459         // authenticated /index request, non-superuser
460         // => UnauthorizedError
461         response = IssueRequest(s.handler, authenticatedReq)
462         ExpectStatusCode(c,
463                 "permissions on, authenticated request, non-superuser",
464                 UnauthorizedError.HTTPCode,
465                 response)
466
467         // authenticated /index/prefix request, non-superuser
468         // => UnauthorizedError
469         response = IssueRequest(s.handler, authPrefixReq)
470         ExpectStatusCode(c,
471                 "permissions on, authenticated /index/prefix request, non-superuser",
472                 UnauthorizedError.HTTPCode,
473                 response)
474
475         // superuser /index request
476         // => OK
477         response = IssueRequest(s.handler, superuserReq)
478         ExpectStatusCode(c,
479                 "permissions on, superuser request",
480                 http.StatusOK,
481                 response)
482
483         // ----------------------------
484         // BlobSigning disabled
485         // Valid Request should still pass.
486         s.cluster.Collections.BlobSigning = false
487
488         // superuser /index request
489         // => OK
490         response = IssueRequest(s.handler, superuserReq)
491         ExpectStatusCode(c,
492                 "permissions on, superuser request",
493                 http.StatusOK,
494                 response)
495
496         expected := `^` + TestHash + `\+\d+ \d+\n` +
497                 TestHash2 + `\+\d+ \d+\n\n$`
498         match, _ := regexp.MatchString(expected, response.Body.String())
499         if !match {
500                 c.Errorf(
501                         "permissions on, superuser request: expected %s, got:\n%s",
502                         expected, response.Body.String())
503         }
504
505         // superuser /index/prefix request
506         // => OK
507         response = IssueRequest(s.handler, superuserPrefixReq)
508         ExpectStatusCode(c,
509                 "permissions on, superuser request",
510                 http.StatusOK,
511                 response)
512
513         expected = `^` + TestHash + `\+\d+ \d+\n\n$`
514         match, _ = regexp.MatchString(expected, response.Body.String())
515         if !match {
516                 c.Errorf(
517                         "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
518                         expected, response.Body.String())
519         }
520
521         // superuser /index/{no-such-prefix} request
522         // => OK
523         response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
524         ExpectStatusCode(c,
525                 "permissions on, superuser request",
526                 http.StatusOK,
527                 response)
528
529         if "\n" != response.Body.String() {
530                 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
531         }
532
533         // superuser /index/{invalid-prefix} request
534         // => StatusBadRequest
535         response = IssueRequest(s.handler, superuserInvalidPrefixReq)
536         ExpectStatusCode(c,
537                 "permissions on, superuser request",
538                 http.StatusBadRequest,
539                 response)
540 }
541
542 // TestDeleteHandler
543 //
544 // Cases tested:
545 //
546 //   With no token and with a non-data-manager token:
547 //   * Delete existing block
548 //     (test for 403 Forbidden, confirm block not deleted)
549 //
550 //   With data manager token:
551 //
552 //   * Delete existing block
553 //     (test for 200 OK, response counts, confirm block deleted)
554 //
555 //   * Delete nonexistent block
556 //     (test for 200 OK, response counts)
557 //
558 //   TODO(twp):
559 //
560 //   * Delete block on read-only and read-write volume
561 //     (test for 200 OK, response with copies_deleted=1,
562 //     copies_failed=1, confirm block deleted only on r/w volume)
563 //
564 //   * Delete block on read-only volume only
565 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
566 //     confirm block not deleted)
567 //
568 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
569         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
570
571         vols := s.handler.volmgr.AllWritable()
572         vols[0].Put(context.Background(), TestHash, TestBlock)
573
574         // Explicitly set the BlobSigningTTL to 0 for these
575         // tests, to ensure the MockVolume deletes the blocks
576         // even though they have just been created.
577         s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
578
579         var userToken = "NOT DATA MANAGER TOKEN"
580         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
581
582         s.cluster.Collections.BlobTrash = true
583
584         unauthReq := &RequestTester{
585                 method: "DELETE",
586                 uri:    "/" + TestHash,
587         }
588
589         userReq := &RequestTester{
590                 method:   "DELETE",
591                 uri:      "/" + TestHash,
592                 apiToken: userToken,
593         }
594
595         superuserExistingBlockReq := &RequestTester{
596                 method:   "DELETE",
597                 uri:      "/" + TestHash,
598                 apiToken: s.cluster.SystemRootToken,
599         }
600
601         superuserNonexistentBlockReq := &RequestTester{
602                 method:   "DELETE",
603                 uri:      "/" + TestHash2,
604                 apiToken: s.cluster.SystemRootToken,
605         }
606
607         // Unauthenticated request returns PermissionError.
608         var response *httptest.ResponseRecorder
609         response = IssueRequest(s.handler, unauthReq)
610         ExpectStatusCode(c,
611                 "unauthenticated request",
612                 PermissionError.HTTPCode,
613                 response)
614
615         // Authenticated non-admin request returns PermissionError.
616         response = IssueRequest(s.handler, userReq)
617         ExpectStatusCode(c,
618                 "authenticated non-admin request",
619                 PermissionError.HTTPCode,
620                 response)
621
622         // Authenticated admin request for nonexistent block.
623         type deletecounter struct {
624                 Deleted int `json:"copies_deleted"`
625                 Failed  int `json:"copies_failed"`
626         }
627         var responseDc, expectedDc deletecounter
628
629         response = IssueRequest(s.handler, superuserNonexistentBlockReq)
630         ExpectStatusCode(c,
631                 "data manager request, nonexistent block",
632                 http.StatusNotFound,
633                 response)
634
635         // Authenticated admin request for existing block while BlobTrash is false.
636         s.cluster.Collections.BlobTrash = false
637         response = IssueRequest(s.handler, superuserExistingBlockReq)
638         ExpectStatusCode(c,
639                 "authenticated request, existing block, method disabled",
640                 MethodDisabledError.HTTPCode,
641                 response)
642         s.cluster.Collections.BlobTrash = true
643
644         // Authenticated admin request for existing block.
645         response = IssueRequest(s.handler, superuserExistingBlockReq)
646         ExpectStatusCode(c,
647                 "data manager request, existing block",
648                 http.StatusOK,
649                 response)
650         // Expect response {"copies_deleted":1,"copies_failed":0}
651         expectedDc = deletecounter{1, 0}
652         json.NewDecoder(response.Body).Decode(&responseDc)
653         if responseDc != expectedDc {
654                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
655                         expectedDc, responseDc)
656         }
657         // Confirm the block has been deleted
658         buf := make([]byte, BlockSize)
659         _, err := vols[0].Get(context.Background(), TestHash, buf)
660         var blockDeleted = os.IsNotExist(err)
661         if !blockDeleted {
662                 c.Error("superuserExistingBlockReq: block not deleted")
663         }
664
665         // A DELETE request on a block newer than BlobSigningTTL
666         // should return success but leave the block on the volume.
667         vols[0].Put(context.Background(), TestHash, TestBlock)
668         s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
669
670         response = IssueRequest(s.handler, superuserExistingBlockReq)
671         ExpectStatusCode(c,
672                 "data manager request, existing block",
673                 http.StatusOK,
674                 response)
675         // Expect response {"copies_deleted":1,"copies_failed":0}
676         expectedDc = deletecounter{1, 0}
677         json.NewDecoder(response.Body).Decode(&responseDc)
678         if responseDc != expectedDc {
679                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
680                         expectedDc, responseDc)
681         }
682         // Confirm the block has NOT been deleted.
683         _, err = vols[0].Get(context.Background(), TestHash, buf)
684         if err != nil {
685                 c.Errorf("testing delete on new block: %s\n", err)
686         }
687 }
688
689 // TestPullHandler
690 //
691 // Test handling of the PUT /pull statement.
692 //
693 // Cases tested: syntactically valid and invalid pull lists, from the
694 // data manager and from unprivileged users:
695 //
696 //   1. Valid pull list from an ordinary user
697 //      (expected result: 401 Unauthorized)
698 //
699 //   2. Invalid pull request from an ordinary user
700 //      (expected result: 401 Unauthorized)
701 //
702 //   3. Valid pull request from the data manager
703 //      (expected result: 200 OK with request body "Received 3 pull
704 //      requests"
705 //
706 //   4. Invalid pull request from the data manager
707 //      (expected result: 400 Bad Request)
708 //
709 // Test that in the end, the pull manager received a good pull list with
710 // the expected number of requests.
711 //
712 // TODO(twp): test concurrency: launch 100 goroutines to update the
713 // pull list simultaneously.  Make sure that none of them return 400
714 // Bad Request and that pullq.GetList() returns a valid list.
715 //
716 func (s *HandlerSuite) TestPullHandler(c *check.C) {
717         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
718
719         // Replace the router's pullq -- which the worker goroutines
720         // started by setup() are now receiving from -- with a new
721         // one, so we can see what the handler sends to it.
722         pullq := NewWorkQueue()
723         s.handler.Handler.(*router).pullq = pullq
724
725         var userToken = "USER TOKEN"
726         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
727
728         goodJSON := []byte(`[
729                 {
730                         "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
731                         "servers":[
732                                 "http://server1",
733                                 "http://server2"
734                         ]
735                 },
736                 {
737                         "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
738                         "servers":[]
739                 },
740                 {
741                         "locator":"cccccccccccccccccccccccccccccccc+12345",
742                         "servers":["http://server1"]
743                 }
744         ]`)
745
746         badJSON := []byte(`{ "key":"I'm a little teapot" }`)
747
748         type pullTest struct {
749                 name         string
750                 req          RequestTester
751                 responseCode int
752                 responseBody string
753         }
754         var testcases = []pullTest{
755                 {
756                         "Valid pull list from an ordinary user",
757                         RequestTester{"/pull", userToken, "PUT", goodJSON},
758                         http.StatusUnauthorized,
759                         "Unauthorized\n",
760                 },
761                 {
762                         "Invalid pull request from an ordinary user",
763                         RequestTester{"/pull", userToken, "PUT", badJSON},
764                         http.StatusUnauthorized,
765                         "Unauthorized\n",
766                 },
767                 {
768                         "Valid pull request from the data manager",
769                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON},
770                         http.StatusOK,
771                         "Received 3 pull requests\n",
772                 },
773                 {
774                         "Invalid pull request from the data manager",
775                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON},
776                         http.StatusBadRequest,
777                         "",
778                 },
779         }
780
781         for _, tst := range testcases {
782                 response := IssueRequest(s.handler, &tst.req)
783                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
784                 ExpectBody(c, tst.name, tst.responseBody, response)
785         }
786
787         // The Keep pull manager should have received one good list with 3
788         // requests on it.
789         for i := 0; i < 3; i++ {
790                 var item interface{}
791                 select {
792                 case item = <-pullq.NextItem:
793                 case <-time.After(time.Second):
794                         c.Error("timed out")
795                 }
796                 if _, ok := item.(PullRequest); !ok {
797                         c.Errorf("item %v could not be parsed as a PullRequest", item)
798                 }
799         }
800
801         expectChannelEmpty(c, pullq.NextItem)
802 }
803
804 // TestTrashHandler
805 //
806 // Test cases:
807 //
808 // Cases tested: syntactically valid and invalid trash lists, from the
809 // data manager and from unprivileged users:
810 //
811 //   1. Valid trash list from an ordinary user
812 //      (expected result: 401 Unauthorized)
813 //
814 //   2. Invalid trash list from an ordinary user
815 //      (expected result: 401 Unauthorized)
816 //
817 //   3. Valid trash list from the data manager
818 //      (expected result: 200 OK with request body "Received 3 trash
819 //      requests"
820 //
821 //   4. Invalid trash list from the data manager
822 //      (expected result: 400 Bad Request)
823 //
824 // Test that in the end, the trash collector received a good list
825 // trash list with the expected number of requests.
826 //
827 // TODO(twp): test concurrency: launch 100 goroutines to update the
828 // pull list simultaneously.  Make sure that none of them return 400
829 // Bad Request and that replica.Dump() returns a valid list.
830 //
831 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
832         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
833         // Replace the router's trashq -- which the worker goroutines
834         // started by setup() are now receiving from -- with a new
835         // one, so we can see what the handler sends to it.
836         trashq := NewWorkQueue()
837         s.handler.Handler.(*router).trashq = trashq
838
839         var userToken = "USER TOKEN"
840         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
841
842         goodJSON := []byte(`[
843                 {
844                         "locator":"block1",
845                         "block_mtime":1409082153
846                 },
847                 {
848                         "locator":"block2",
849                         "block_mtime":1409082153
850                 },
851                 {
852                         "locator":"block3",
853                         "block_mtime":1409082153
854                 }
855         ]`)
856
857         badJSON := []byte(`I am not a valid JSON string`)
858
859         type trashTest struct {
860                 name         string
861                 req          RequestTester
862                 responseCode int
863                 responseBody string
864         }
865
866         var testcases = []trashTest{
867                 {
868                         "Valid trash list from an ordinary user",
869                         RequestTester{"/trash", userToken, "PUT", goodJSON},
870                         http.StatusUnauthorized,
871                         "Unauthorized\n",
872                 },
873                 {
874                         "Invalid trash list from an ordinary user",
875                         RequestTester{"/trash", userToken, "PUT", badJSON},
876                         http.StatusUnauthorized,
877                         "Unauthorized\n",
878                 },
879                 {
880                         "Valid trash list from the data manager",
881                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON},
882                         http.StatusOK,
883                         "Received 3 trash requests\n",
884                 },
885                 {
886                         "Invalid trash list from the data manager",
887                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON},
888                         http.StatusBadRequest,
889                         "",
890                 },
891         }
892
893         for _, tst := range testcases {
894                 response := IssueRequest(s.handler, &tst.req)
895                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
896                 ExpectBody(c, tst.name, tst.responseBody, response)
897         }
898
899         // The trash collector should have received one good list with 3
900         // requests on it.
901         for i := 0; i < 3; i++ {
902                 item := <-trashq.NextItem
903                 if _, ok := item.(TrashRequest); !ok {
904                         c.Errorf("item %v could not be parsed as a TrashRequest", item)
905                 }
906         }
907
908         expectChannelEmpty(c, trashq.NextItem)
909 }
910
911 // ====================
912 // Helper functions
913 // ====================
914
915 // IssueTestRequest executes an HTTP request described by rt, to a
916 // REST router.  It returns the HTTP response to the request.
917 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
918         response := httptest.NewRecorder()
919         body := bytes.NewReader(rt.requestBody)
920         req, _ := http.NewRequest(rt.method, rt.uri, body)
921         if rt.apiToken != "" {
922                 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
923         }
924         handler.ServeHTTP(response, req)
925         return response
926 }
927
928 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
929         response := httptest.NewRecorder()
930         body := bytes.NewReader(rt.requestBody)
931         req, _ := http.NewRequest(rt.method, rt.uri, body)
932         if rt.apiToken != "" {
933                 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
934         }
935         handler.ServeHTTP(response, req)
936         return response
937 }
938
939 // ExpectStatusCode checks whether a response has the specified status code,
940 // and reports a test failure if not.
941 func ExpectStatusCode(
942         c *check.C,
943         testname string,
944         expectedStatus int,
945         response *httptest.ResponseRecorder) {
946         c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
947 }
948
949 func ExpectBody(
950         c *check.C,
951         testname string,
952         expectedBody string,
953         response *httptest.ResponseRecorder) {
954         if expectedBody != "" && response.Body.String() != expectedBody {
955                 c.Errorf("%s: expected response body '%s', got %+v",
956                         testname, expectedBody, response)
957         }
958 }
959
960 // See #7121
961 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
962         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
963
964         defer func(orig *bufferPool) {
965                 bufs = orig
966         }(bufs)
967         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
968
969         ok := make(chan struct{})
970         go func() {
971                 for i := 0; i < 2; i++ {
972                         response := IssueRequest(s.handler,
973                                 &RequestTester{
974                                         method:      "PUT",
975                                         uri:         "/" + TestHash,
976                                         requestBody: TestBlock,
977                                 })
978                         ExpectStatusCode(c,
979                                 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
980                 }
981                 ok <- struct{}{}
982         }()
983
984         select {
985         case <-ok:
986         case <-time.After(time.Second):
987                 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
988         }
989 }
990
991 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
992 // leak.
993 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
994         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
995
996         ok := make(chan bool)
997         go func() {
998                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
999                         // Unauthenticated request, no server key
1000                         // => OK (unsigned response)
1001                         unsignedLocator := "/" + TestHash
1002                         response := IssueRequest(s.handler,
1003                                 &RequestTester{
1004                                         method:      "PUT",
1005                                         uri:         unsignedLocator,
1006                                         requestBody: TestBlock,
1007                                 })
1008                         ExpectStatusCode(c,
1009                                 "TestPutHandlerBufferleak", http.StatusOK, response)
1010                         ExpectBody(c,
1011                                 "TestPutHandlerBufferleak",
1012                                 TestHashPutResp, response)
1013                 }
1014                 ok <- true
1015         }()
1016         select {
1017         case <-time.After(20 * time.Second):
1018                 // If the buffer pool leaks, the test goroutine hangs.
1019                 c.Fatal("test did not finish, assuming pool leaked")
1020         case <-ok:
1021         }
1022 }
1023
1024 type notifyingResponseRecorder struct {
1025         *httptest.ResponseRecorder
1026         closer chan bool
1027 }
1028
1029 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1030         return r.closer
1031 }
1032
1033 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1034         s.cluster.Collections.BlobSigning = false
1035         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1036
1037         defer func(orig *bufferPool) {
1038                 bufs = orig
1039         }(bufs)
1040         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1041         defer bufs.Put(bufs.Get(BlockSize))
1042
1043         if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1044                 c.Error(err)
1045         }
1046
1047         resp := &notifyingResponseRecorder{
1048                 ResponseRecorder: httptest.NewRecorder(),
1049                 closer:           make(chan bool, 1),
1050         }
1051         if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1052                 c.Fatal("notifyingResponseRecorder is broken")
1053         }
1054         // If anyone asks, the client has disconnected.
1055         resp.closer <- true
1056
1057         ok := make(chan struct{})
1058         go func() {
1059                 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1060                 s.handler.ServeHTTP(resp, req)
1061                 ok <- struct{}{}
1062         }()
1063
1064         select {
1065         case <-time.After(20 * time.Second):
1066                 c.Fatal("request took >20s, close notifier must be broken")
1067         case <-ok:
1068         }
1069
1070         ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1071         for i, v := range s.handler.volmgr.AllWritable() {
1072                 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1073                         c.Errorf("volume %d got %d calls, expected 0", i, calls)
1074                 }
1075         }
1076 }
1077
1078 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1079 // leak.
1080 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1081         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1082
1083         vols := s.handler.volmgr.AllWritable()
1084         if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1085                 c.Error(err)
1086         }
1087
1088         ok := make(chan bool)
1089         go func() {
1090                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1091                         // Unauthenticated request, unsigned locator
1092                         // => OK
1093                         unsignedLocator := "/" + TestHash
1094                         response := IssueRequest(s.handler,
1095                                 &RequestTester{
1096                                         method: "GET",
1097                                         uri:    unsignedLocator,
1098                                 })
1099                         ExpectStatusCode(c,
1100                                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1101                         ExpectBody(c,
1102                                 "Unauthenticated request, unsigned locator",
1103                                 string(TestBlock),
1104                                 response)
1105                 }
1106                 ok <- true
1107         }()
1108         select {
1109         case <-time.After(20 * time.Second):
1110                 // If the buffer pool leaks, the test goroutine hangs.
1111                 c.Fatal("test did not finish, assuming pool leaked")
1112         case <-ok:
1113         }
1114 }
1115
1116 func (s *HandlerSuite) TestPutReplicationHeader(c *check.C) {
1117         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1118
1119         resp := IssueRequest(s.handler, &RequestTester{
1120                 method:      "PUT",
1121                 uri:         "/" + TestHash,
1122                 requestBody: TestBlock,
1123         })
1124         if r := resp.Header().Get("X-Keep-Replicas-Stored"); r != "1" {
1125                 c.Logf("%#v", resp)
1126                 c.Errorf("Got X-Keep-Replicas-Stored: %q, expected %q", r, "1")
1127         }
1128 }
1129
1130 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1131         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1132
1133         // Set up Keep volumes
1134         vols := s.handler.volmgr.AllWritable()
1135         vols[0].Put(context.Background(), TestHash, TestBlock)
1136
1137         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1138
1139         // unauthenticatedReq => UnauthorizedError
1140         unauthenticatedReq := &RequestTester{
1141                 method: "PUT",
1142                 uri:    "/untrash/" + TestHash,
1143         }
1144         response := IssueRequest(s.handler, unauthenticatedReq)
1145         ExpectStatusCode(c,
1146                 "Unauthenticated request",
1147                 UnauthorizedError.HTTPCode,
1148                 response)
1149
1150         // notDataManagerReq => UnauthorizedError
1151         notDataManagerReq := &RequestTester{
1152                 method:   "PUT",
1153                 uri:      "/untrash/" + TestHash,
1154                 apiToken: knownToken,
1155         }
1156
1157         response = IssueRequest(s.handler, notDataManagerReq)
1158         ExpectStatusCode(c,
1159                 "Non-datamanager token",
1160                 UnauthorizedError.HTTPCode,
1161                 response)
1162
1163         // datamanagerWithBadHashReq => StatusBadRequest
1164         datamanagerWithBadHashReq := &RequestTester{
1165                 method:   "PUT",
1166                 uri:      "/untrash/thisisnotalocator",
1167                 apiToken: s.cluster.SystemRootToken,
1168         }
1169         response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1170         ExpectStatusCode(c,
1171                 "Bad locator in untrash request",
1172                 http.StatusBadRequest,
1173                 response)
1174
1175         // datamanagerWrongMethodReq => StatusBadRequest
1176         datamanagerWrongMethodReq := &RequestTester{
1177                 method:   "GET",
1178                 uri:      "/untrash/" + TestHash,
1179                 apiToken: s.cluster.SystemRootToken,
1180         }
1181         response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1182         ExpectStatusCode(c,
1183                 "Only PUT method is supported for untrash",
1184                 http.StatusMethodNotAllowed,
1185                 response)
1186
1187         // datamanagerReq => StatusOK
1188         datamanagerReq := &RequestTester{
1189                 method:   "PUT",
1190                 uri:      "/untrash/" + TestHash,
1191                 apiToken: s.cluster.SystemRootToken,
1192         }
1193         response = IssueRequest(s.handler, datamanagerReq)
1194         ExpectStatusCode(c,
1195                 "",
1196                 http.StatusOK,
1197                 response)
1198         c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1199 }
1200
1201 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1202         // Change all volumes to read-only
1203         for uuid, v := range s.cluster.Volumes {
1204                 v.ReadOnly = true
1205                 s.cluster.Volumes[uuid] = v
1206         }
1207         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1208
1209         // datamanagerReq => StatusOK
1210         datamanagerReq := &RequestTester{
1211                 method:   "PUT",
1212                 uri:      "/untrash/" + TestHash,
1213                 apiToken: s.cluster.SystemRootToken,
1214         }
1215         response := IssueRequest(s.handler, datamanagerReq)
1216         ExpectStatusCode(c,
1217                 "No writable volumes",
1218                 http.StatusNotFound,
1219                 response)
1220 }
1221
1222 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1223         s.cluster.ManagementToken = arvadostest.ManagementToken
1224         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1225         pingReq := &RequestTester{
1226                 method:   "GET",
1227                 uri:      "/_health/ping",
1228                 apiToken: arvadostest.ManagementToken,
1229         }
1230         response := IssueHealthCheckRequest(s.handler, pingReq)
1231         ExpectStatusCode(c,
1232                 "",
1233                 http.StatusOK,
1234                 response)
1235         want := `{"health":"OK"}`
1236         if !strings.Contains(response.Body.String(), want) {
1237                 c.Errorf("expected response to include %s: got %s", want, response.Body.String())
1238         }
1239 }