18013: Merge branch 'main' into 18013-token-rake-task-fix
[arvados.git] / services / keepstore / handler_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Tests for Keep HTTP handlers:
6 //
7 //     GetBlockHandler
8 //     PutBlockHandler
9 //     IndexHandler
10 //
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
13
14 package main
15
16 import (
17         "bytes"
18         "context"
19         "encoding/json"
20         "fmt"
21         "net/http"
22         "net/http/httptest"
23         "os"
24         "sort"
25         "strings"
26         "time"
27
28         "git.arvados.org/arvados.git/lib/config"
29         "git.arvados.org/arvados.git/sdk/go/arvados"
30         "git.arvados.org/arvados.git/sdk/go/arvadostest"
31         "git.arvados.org/arvados.git/sdk/go/ctxlog"
32         "github.com/prometheus/client_golang/prometheus"
33         check "gopkg.in/check.v1"
34 )
35
36 var testServiceURL = func() arvados.URL {
37         return arvados.URL{Host: "localhost:12345", Scheme: "http"}
38 }()
39
40 func testCluster(t TB) *arvados.Cluster {
41         cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
42         if err != nil {
43                 t.Fatal(err)
44         }
45         cluster, err := cfg.GetCluster("")
46         if err != nil {
47                 t.Fatal(err)
48         }
49         cluster.SystemRootToken = arvadostest.SystemRootToken
50         cluster.ManagementToken = arvadostest.ManagementToken
51         cluster.Collections.BlobSigning = false
52         return cluster
53 }
54
55 var _ = check.Suite(&HandlerSuite{})
56
57 type HandlerSuite struct {
58         cluster *arvados.Cluster
59         handler *handler
60 }
61
62 func (s *HandlerSuite) SetUpTest(c *check.C) {
63         s.cluster = testCluster(c)
64         s.cluster.Volumes = map[string]arvados.Volume{
65                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
66                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
67         }
68         s.handler = &handler{}
69 }
70
71 // A RequestTester represents the parameters for an HTTP request to
72 // be issued on behalf of a unit test.
73 type RequestTester struct {
74         uri            string
75         apiToken       string
76         method         string
77         requestBody    []byte
78         storageClasses string
79 }
80
81 // Test GetBlockHandler on the following situations:
82 //   - permissions off, unauthenticated request, unsigned locator
83 //   - permissions on, authenticated request, signed locator
84 //   - permissions on, authenticated request, unsigned locator
85 //   - permissions on, unauthenticated request, signed locator
86 //   - permissions on, authenticated request, expired locator
87 //   - permissions on, authenticated request, signed locator, transient error from backend
88 //
89 func (s *HandlerSuite) TestGetHandler(c *check.C) {
90         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
91
92         vols := s.handler.volmgr.AllWritable()
93         err := vols[0].Put(context.Background(), TestHash, TestBlock)
94         c.Check(err, check.IsNil)
95
96         // Create locators for testing.
97         // Turn on permission settings so we can generate signed locators.
98         s.cluster.Collections.BlobSigning = true
99         s.cluster.Collections.BlobSigningKey = knownKey
100         s.cluster.Collections.BlobSigningTTL.Set("5m")
101
102         var (
103                 unsignedLocator  = "/" + TestHash
104                 validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
105                 expiredTimestamp = time.Now().Add(-time.Hour)
106                 signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
107                 expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
108         )
109
110         // -----------------
111         // Test unauthenticated request with permissions off.
112         s.cluster.Collections.BlobSigning = false
113
114         // Unauthenticated request, unsigned locator
115         // => OK
116         response := IssueRequest(s.handler,
117                 &RequestTester{
118                         method: "GET",
119                         uri:    unsignedLocator,
120                 })
121         ExpectStatusCode(c,
122                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
123         ExpectBody(c,
124                 "Unauthenticated request, unsigned locator",
125                 string(TestBlock),
126                 response)
127
128         receivedLen := response.Header().Get("Content-Length")
129         expectedLen := fmt.Sprintf("%d", len(TestBlock))
130         if receivedLen != expectedLen {
131                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
132         }
133
134         // ----------------
135         // Permissions: on.
136         s.cluster.Collections.BlobSigning = true
137
138         // Authenticated request, signed locator
139         // => OK
140         response = IssueRequest(s.handler, &RequestTester{
141                 method:   "GET",
142                 uri:      signedLocator,
143                 apiToken: knownToken,
144         })
145         ExpectStatusCode(c,
146                 "Authenticated request, signed locator", http.StatusOK, response)
147         ExpectBody(c,
148                 "Authenticated request, signed locator", string(TestBlock), response)
149
150         receivedLen = response.Header().Get("Content-Length")
151         expectedLen = fmt.Sprintf("%d", len(TestBlock))
152         if receivedLen != expectedLen {
153                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
154         }
155
156         // Authenticated request, unsigned locator
157         // => PermissionError
158         response = IssueRequest(s.handler, &RequestTester{
159                 method:   "GET",
160                 uri:      unsignedLocator,
161                 apiToken: knownToken,
162         })
163         ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
164
165         // Unauthenticated request, signed locator
166         // => PermissionError
167         response = IssueRequest(s.handler, &RequestTester{
168                 method: "GET",
169                 uri:    signedLocator,
170         })
171         ExpectStatusCode(c,
172                 "Unauthenticated request, signed locator",
173                 PermissionError.HTTPCode, response)
174
175         // Authenticated request, expired locator
176         // => ExpiredError
177         response = IssueRequest(s.handler, &RequestTester{
178                 method:   "GET",
179                 uri:      expiredLocator,
180                 apiToken: knownToken,
181         })
182         ExpectStatusCode(c,
183                 "Authenticated request, expired locator",
184                 ExpiredError.HTTPCode, response)
185
186         // Authenticated request, signed locator
187         // => 503 Server busy (transient error)
188
189         // Set up the block owning volume to respond with errors
190         vols[0].Volume.(*MockVolume).Bad = true
191         vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
192         response = IssueRequest(s.handler, &RequestTester{
193                 method:   "GET",
194                 uri:      signedLocator,
195                 apiToken: knownToken,
196         })
197         // A transient error from one volume while the other doesn't find the block
198         // should make the service return a 503 so that clients can retry.
199         ExpectStatusCode(c,
200                 "Volume backend busy",
201                 503, response)
202 }
203
204 // Test PutBlockHandler on the following situations:
205 //   - no server key
206 //   - with server key, authenticated request, unsigned locator
207 //   - with server key, unauthenticated request, unsigned locator
208 //
209 func (s *HandlerSuite) TestPutHandler(c *check.C) {
210         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
211
212         // --------------
213         // No server key.
214
215         s.cluster.Collections.BlobSigningKey = ""
216
217         // Unauthenticated request, no server key
218         // => OK (unsigned response)
219         unsignedLocator := "/" + TestHash
220         response := IssueRequest(s.handler,
221                 &RequestTester{
222                         method:      "PUT",
223                         uri:         unsignedLocator,
224                         requestBody: TestBlock,
225                 })
226
227         ExpectStatusCode(c,
228                 "Unauthenticated request, no server key", http.StatusOK, response)
229         ExpectBody(c,
230                 "Unauthenticated request, no server key",
231                 TestHashPutResp, response)
232
233         // ------------------
234         // With a server key.
235
236         s.cluster.Collections.BlobSigningKey = knownKey
237         s.cluster.Collections.BlobSigningTTL.Set("5m")
238
239         // When a permission key is available, the locator returned
240         // from an authenticated PUT request will be signed.
241
242         // Authenticated PUT, signed locator
243         // => OK (signed response)
244         response = IssueRequest(s.handler,
245                 &RequestTester{
246                         method:      "PUT",
247                         uri:         unsignedLocator,
248                         requestBody: TestBlock,
249                         apiToken:    knownToken,
250                 })
251
252         ExpectStatusCode(c,
253                 "Authenticated PUT, signed locator, with server key",
254                 http.StatusOK, response)
255         responseLocator := strings.TrimSpace(response.Body.String())
256         if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
257                 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
258                         "response '%s' does not contain a valid signature",
259                         responseLocator)
260         }
261
262         // Unauthenticated PUT, unsigned locator
263         // => OK
264         response = IssueRequest(s.handler,
265                 &RequestTester{
266                         method:      "PUT",
267                         uri:         unsignedLocator,
268                         requestBody: TestBlock,
269                 })
270
271         ExpectStatusCode(c,
272                 "Unauthenticated PUT, unsigned locator, with server key",
273                 http.StatusOK, response)
274         ExpectBody(c,
275                 "Unauthenticated PUT, unsigned locator, with server key",
276                 TestHashPutResp, response)
277 }
278
279 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
280         s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
281         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
282
283         s.cluster.SystemRootToken = "fake-data-manager-token"
284         IssueRequest(s.handler,
285                 &RequestTester{
286                         method:      "PUT",
287                         uri:         "/" + TestHash,
288                         requestBody: TestBlock,
289                 })
290
291         s.cluster.Collections.BlobTrash = true
292         IssueRequest(s.handler,
293                 &RequestTester{
294                         method:      "DELETE",
295                         uri:         "/" + TestHash,
296                         requestBody: TestBlock,
297                         apiToken:    s.cluster.SystemRootToken,
298                 })
299         type expect struct {
300                 volid     string
301                 method    string
302                 callcount int
303         }
304         for _, e := range []expect{
305                 {"zzzzz-nyw5e-000000000000000", "Get", 0},
306                 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
307                 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
308                 {"zzzzz-nyw5e-000000000000000", "Put", 0},
309                 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
310                 {"zzzzz-nyw5e-111111111111111", "Get", 0},
311                 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
312                 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
313                 {"zzzzz-nyw5e-111111111111111", "Put", 1},
314                 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
315         } {
316                 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
317                         c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
318                 }
319         }
320 }
321
322 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
323         s.cluster.Volumes = map[string]arvados.Volume{
324                 "zzzzz-nyw5e-111111111111111": {
325                         Driver:         "mock",
326                         Replication:    1,
327                         StorageClasses: map[string]bool{"class1": true}},
328                 "zzzzz-nyw5e-222222222222222": {
329                         Driver:         "mock",
330                         Replication:    1,
331                         StorageClasses: map[string]bool{"class2": true, "class3": true}},
332         }
333
334         for _, trial := range []struct {
335                 priority1 int // priority of class1, thus vol1
336                 priority2 int // priority of class2
337                 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
338                 get1      int // expected number of "get" ops on vol1
339                 get2      int // expected number of "get" ops on vol2
340         }{
341                 {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
342                 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
343                 {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
344                 {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
345         } {
346                 c.Logf("%+v", trial)
347                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
348                         "class1": {Priority: trial.priority1},
349                         "class2": {Priority: trial.priority2},
350                         "class3": {Priority: trial.priority3},
351                 }
352                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
353                 IssueRequest(s.handler,
354                         &RequestTester{
355                                 method:         "PUT",
356                                 uri:            "/" + TestHash,
357                                 requestBody:    TestBlock,
358                                 storageClasses: "class1",
359                         })
360                 IssueRequest(s.handler,
361                         &RequestTester{
362                                 method: "GET",
363                                 uri:    "/" + TestHash,
364                         })
365                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
366                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
367         }
368 }
369
370 // Test TOUCH requests.
371 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
372         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
373         vols := s.handler.volmgr.AllWritable()
374         vols[0].Put(context.Background(), TestHash, TestBlock)
375         vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
376         afterPut := time.Now()
377         t, err := vols[0].Mtime(TestHash)
378         c.Assert(err, check.IsNil)
379         c.Assert(t.Before(afterPut), check.Equals, true)
380
381         ExpectStatusCode(c,
382                 "touch with no credentials",
383                 http.StatusUnauthorized,
384                 IssueRequest(s.handler, &RequestTester{
385                         method: "TOUCH",
386                         uri:    "/" + TestHash,
387                 }))
388
389         ExpectStatusCode(c,
390                 "touch with non-root credentials",
391                 http.StatusUnauthorized,
392                 IssueRequest(s.handler, &RequestTester{
393                         method:   "TOUCH",
394                         uri:      "/" + TestHash,
395                         apiToken: arvadostest.ActiveTokenV2,
396                 }))
397
398         ExpectStatusCode(c,
399                 "touch non-existent block",
400                 http.StatusNotFound,
401                 IssueRequest(s.handler, &RequestTester{
402                         method:   "TOUCH",
403                         uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
404                         apiToken: s.cluster.SystemRootToken,
405                 }))
406
407         beforeTouch := time.Now()
408         ExpectStatusCode(c,
409                 "touch block",
410                 http.StatusOK,
411                 IssueRequest(s.handler, &RequestTester{
412                         method:   "TOUCH",
413                         uri:      "/" + TestHash,
414                         apiToken: s.cluster.SystemRootToken,
415                 }))
416         t, err = vols[0].Mtime(TestHash)
417         c.Assert(err, check.IsNil)
418         c.Assert(t.After(beforeTouch), check.Equals, true)
419 }
420
421 // Test /index requests:
422 //   - unauthenticated /index request
423 //   - unauthenticated /index/prefix request
424 //   - authenticated   /index request        | non-superuser
425 //   - authenticated   /index/prefix request | non-superuser
426 //   - authenticated   /index request        | superuser
427 //   - authenticated   /index/prefix request | superuser
428 //
429 // The only /index requests that should succeed are those issued by the
430 // superuser. They should pass regardless of the value of BlobSigning.
431 //
432 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
433         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
434
435         // Include multiple blocks on different volumes, and
436         // some metadata files (which should be omitted from index listings)
437         vols := s.handler.volmgr.AllWritable()
438         vols[0].Put(context.Background(), TestHash, TestBlock)
439         vols[1].Put(context.Background(), TestHash2, TestBlock2)
440         vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
441         vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
442
443         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
444
445         unauthenticatedReq := &RequestTester{
446                 method: "GET",
447                 uri:    "/index",
448         }
449         authenticatedReq := &RequestTester{
450                 method:   "GET",
451                 uri:      "/index",
452                 apiToken: knownToken,
453         }
454         superuserReq := &RequestTester{
455                 method:   "GET",
456                 uri:      "/index",
457                 apiToken: s.cluster.SystemRootToken,
458         }
459         unauthPrefixReq := &RequestTester{
460                 method: "GET",
461                 uri:    "/index/" + TestHash[0:3],
462         }
463         authPrefixReq := &RequestTester{
464                 method:   "GET",
465                 uri:      "/index/" + TestHash[0:3],
466                 apiToken: knownToken,
467         }
468         superuserPrefixReq := &RequestTester{
469                 method:   "GET",
470                 uri:      "/index/" + TestHash[0:3],
471                 apiToken: s.cluster.SystemRootToken,
472         }
473         superuserNoSuchPrefixReq := &RequestTester{
474                 method:   "GET",
475                 uri:      "/index/abcd",
476                 apiToken: s.cluster.SystemRootToken,
477         }
478         superuserInvalidPrefixReq := &RequestTester{
479                 method:   "GET",
480                 uri:      "/index/xyz",
481                 apiToken: s.cluster.SystemRootToken,
482         }
483
484         // -------------------------------------------------------------
485         // Only the superuser should be allowed to issue /index requests.
486
487         // ---------------------------
488         // BlobSigning enabled
489         // This setting should not affect tests passing.
490         s.cluster.Collections.BlobSigning = true
491
492         // unauthenticated /index request
493         // => UnauthorizedError
494         response := IssueRequest(s.handler, unauthenticatedReq)
495         ExpectStatusCode(c,
496                 "permissions on, unauthenticated request",
497                 UnauthorizedError.HTTPCode,
498                 response)
499
500         // unauthenticated /index/prefix request
501         // => UnauthorizedError
502         response = IssueRequest(s.handler, unauthPrefixReq)
503         ExpectStatusCode(c,
504                 "permissions on, unauthenticated /index/prefix request",
505                 UnauthorizedError.HTTPCode,
506                 response)
507
508         // authenticated /index request, non-superuser
509         // => UnauthorizedError
510         response = IssueRequest(s.handler, authenticatedReq)
511         ExpectStatusCode(c,
512                 "permissions on, authenticated request, non-superuser",
513                 UnauthorizedError.HTTPCode,
514                 response)
515
516         // authenticated /index/prefix request, non-superuser
517         // => UnauthorizedError
518         response = IssueRequest(s.handler, authPrefixReq)
519         ExpectStatusCode(c,
520                 "permissions on, authenticated /index/prefix request, non-superuser",
521                 UnauthorizedError.HTTPCode,
522                 response)
523
524         // superuser /index request
525         // => OK
526         response = IssueRequest(s.handler, superuserReq)
527         ExpectStatusCode(c,
528                 "permissions on, superuser request",
529                 http.StatusOK,
530                 response)
531
532         // ----------------------------
533         // BlobSigning disabled
534         // Valid Request should still pass.
535         s.cluster.Collections.BlobSigning = false
536
537         // superuser /index request
538         // => OK
539         response = IssueRequest(s.handler, superuserReq)
540         ExpectStatusCode(c,
541                 "permissions on, superuser request",
542                 http.StatusOK,
543                 response)
544
545         expected := `^` + TestHash + `\+\d+ \d+\n` +
546                 TestHash2 + `\+\d+ \d+\n\n$`
547         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
548                 "permissions on, superuser request"))
549
550         // superuser /index/prefix request
551         // => OK
552         response = IssueRequest(s.handler, superuserPrefixReq)
553         ExpectStatusCode(c,
554                 "permissions on, superuser request",
555                 http.StatusOK,
556                 response)
557
558         expected = `^` + TestHash + `\+\d+ \d+\n\n$`
559         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
560                 "permissions on, superuser /index/prefix request"))
561
562         // superuser /index/{no-such-prefix} request
563         // => OK
564         response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
565         ExpectStatusCode(c,
566                 "permissions on, superuser request",
567                 http.StatusOK,
568                 response)
569
570         if "\n" != response.Body.String() {
571                 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
572         }
573
574         // superuser /index/{invalid-prefix} request
575         // => StatusBadRequest
576         response = IssueRequest(s.handler, superuserInvalidPrefixReq)
577         ExpectStatusCode(c,
578                 "permissions on, superuser request",
579                 http.StatusBadRequest,
580                 response)
581 }
582
583 // TestDeleteHandler
584 //
585 // Cases tested:
586 //
587 //   With no token and with a non-data-manager token:
588 //   * Delete existing block
589 //     (test for 403 Forbidden, confirm block not deleted)
590 //
591 //   With data manager token:
592 //
593 //   * Delete existing block
594 //     (test for 200 OK, response counts, confirm block deleted)
595 //
596 //   * Delete nonexistent block
597 //     (test for 200 OK, response counts)
598 //
599 //   TODO(twp):
600 //
601 //   * Delete block on read-only and read-write volume
602 //     (test for 200 OK, response with copies_deleted=1,
603 //     copies_failed=1, confirm block deleted only on r/w volume)
604 //
605 //   * Delete block on read-only volume only
606 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
607 //     confirm block not deleted)
608 //
609 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
610         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
611
612         vols := s.handler.volmgr.AllWritable()
613         vols[0].Put(context.Background(), TestHash, TestBlock)
614
615         // Explicitly set the BlobSigningTTL to 0 for these
616         // tests, to ensure the MockVolume deletes the blocks
617         // even though they have just been created.
618         s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
619
620         var userToken = "NOT DATA MANAGER TOKEN"
621         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
622
623         s.cluster.Collections.BlobTrash = true
624
625         unauthReq := &RequestTester{
626                 method: "DELETE",
627                 uri:    "/" + TestHash,
628         }
629
630         userReq := &RequestTester{
631                 method:   "DELETE",
632                 uri:      "/" + TestHash,
633                 apiToken: userToken,
634         }
635
636         superuserExistingBlockReq := &RequestTester{
637                 method:   "DELETE",
638                 uri:      "/" + TestHash,
639                 apiToken: s.cluster.SystemRootToken,
640         }
641
642         superuserNonexistentBlockReq := &RequestTester{
643                 method:   "DELETE",
644                 uri:      "/" + TestHash2,
645                 apiToken: s.cluster.SystemRootToken,
646         }
647
648         // Unauthenticated request returns PermissionError.
649         var response *httptest.ResponseRecorder
650         response = IssueRequest(s.handler, unauthReq)
651         ExpectStatusCode(c,
652                 "unauthenticated request",
653                 PermissionError.HTTPCode,
654                 response)
655
656         // Authenticated non-admin request returns PermissionError.
657         response = IssueRequest(s.handler, userReq)
658         ExpectStatusCode(c,
659                 "authenticated non-admin request",
660                 PermissionError.HTTPCode,
661                 response)
662
663         // Authenticated admin request for nonexistent block.
664         type deletecounter struct {
665                 Deleted int `json:"copies_deleted"`
666                 Failed  int `json:"copies_failed"`
667         }
668         var responseDc, expectedDc deletecounter
669
670         response = IssueRequest(s.handler, superuserNonexistentBlockReq)
671         ExpectStatusCode(c,
672                 "data manager request, nonexistent block",
673                 http.StatusNotFound,
674                 response)
675
676         // Authenticated admin request for existing block while BlobTrash is false.
677         s.cluster.Collections.BlobTrash = false
678         response = IssueRequest(s.handler, superuserExistingBlockReq)
679         ExpectStatusCode(c,
680                 "authenticated request, existing block, method disabled",
681                 MethodDisabledError.HTTPCode,
682                 response)
683         s.cluster.Collections.BlobTrash = true
684
685         // Authenticated admin request for existing block.
686         response = IssueRequest(s.handler, superuserExistingBlockReq)
687         ExpectStatusCode(c,
688                 "data manager request, existing block",
689                 http.StatusOK,
690                 response)
691         // Expect response {"copies_deleted":1,"copies_failed":0}
692         expectedDc = deletecounter{1, 0}
693         json.NewDecoder(response.Body).Decode(&responseDc)
694         if responseDc != expectedDc {
695                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
696                         expectedDc, responseDc)
697         }
698         // Confirm the block has been deleted
699         buf := make([]byte, BlockSize)
700         _, err := vols[0].Get(context.Background(), TestHash, buf)
701         var blockDeleted = os.IsNotExist(err)
702         if !blockDeleted {
703                 c.Error("superuserExistingBlockReq: block not deleted")
704         }
705
706         // A DELETE request on a block newer than BlobSigningTTL
707         // should return success but leave the block on the volume.
708         vols[0].Put(context.Background(), TestHash, TestBlock)
709         s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
710
711         response = IssueRequest(s.handler, superuserExistingBlockReq)
712         ExpectStatusCode(c,
713                 "data manager request, existing block",
714                 http.StatusOK,
715                 response)
716         // Expect response {"copies_deleted":1,"copies_failed":0}
717         expectedDc = deletecounter{1, 0}
718         json.NewDecoder(response.Body).Decode(&responseDc)
719         if responseDc != expectedDc {
720                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
721                         expectedDc, responseDc)
722         }
723         // Confirm the block has NOT been deleted.
724         _, err = vols[0].Get(context.Background(), TestHash, buf)
725         if err != nil {
726                 c.Errorf("testing delete on new block: %s\n", err)
727         }
728 }
729
730 // TestPullHandler
731 //
732 // Test handling of the PUT /pull statement.
733 //
734 // Cases tested: syntactically valid and invalid pull lists, from the
735 // data manager and from unprivileged users:
736 //
737 //   1. Valid pull list from an ordinary user
738 //      (expected result: 401 Unauthorized)
739 //
740 //   2. Invalid pull request from an ordinary user
741 //      (expected result: 401 Unauthorized)
742 //
743 //   3. Valid pull request from the data manager
744 //      (expected result: 200 OK with request body "Received 3 pull
745 //      requests"
746 //
747 //   4. Invalid pull request from the data manager
748 //      (expected result: 400 Bad Request)
749 //
750 // Test that in the end, the pull manager received a good pull list with
751 // the expected number of requests.
752 //
753 // TODO(twp): test concurrency: launch 100 goroutines to update the
754 // pull list simultaneously.  Make sure that none of them return 400
755 // Bad Request and that pullq.GetList() returns a valid list.
756 //
757 func (s *HandlerSuite) TestPullHandler(c *check.C) {
758         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
759
760         // Replace the router's pullq -- which the worker goroutines
761         // started by setup() are now receiving from -- with a new
762         // one, so we can see what the handler sends to it.
763         pullq := NewWorkQueue()
764         s.handler.Handler.(*router).pullq = pullq
765
766         var userToken = "USER TOKEN"
767         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
768
769         goodJSON := []byte(`[
770                 {
771                         "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
772                         "servers":[
773                                 "http://server1",
774                                 "http://server2"
775                         ]
776                 },
777                 {
778                         "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
779                         "servers":[]
780                 },
781                 {
782                         "locator":"cccccccccccccccccccccccccccccccc+12345",
783                         "servers":["http://server1"]
784                 }
785         ]`)
786
787         badJSON := []byte(`{ "key":"I'm a little teapot" }`)
788
789         type pullTest struct {
790                 name         string
791                 req          RequestTester
792                 responseCode int
793                 responseBody string
794         }
795         var testcases = []pullTest{
796                 {
797                         "Valid pull list from an ordinary user",
798                         RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
799                         http.StatusUnauthorized,
800                         "Unauthorized\n",
801                 },
802                 {
803                         "Invalid pull request from an ordinary user",
804                         RequestTester{"/pull", userToken, "PUT", badJSON, ""},
805                         http.StatusUnauthorized,
806                         "Unauthorized\n",
807                 },
808                 {
809                         "Valid pull request from the data manager",
810                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
811                         http.StatusOK,
812                         "Received 3 pull requests\n",
813                 },
814                 {
815                         "Invalid pull request from the data manager",
816                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
817                         http.StatusBadRequest,
818                         "",
819                 },
820         }
821
822         for _, tst := range testcases {
823                 response := IssueRequest(s.handler, &tst.req)
824                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
825                 ExpectBody(c, tst.name, tst.responseBody, response)
826         }
827
828         // The Keep pull manager should have received one good list with 3
829         // requests on it.
830         for i := 0; i < 3; i++ {
831                 var item interface{}
832                 select {
833                 case item = <-pullq.NextItem:
834                 case <-time.After(time.Second):
835                         c.Error("timed out")
836                 }
837                 if _, ok := item.(PullRequest); !ok {
838                         c.Errorf("item %v could not be parsed as a PullRequest", item)
839                 }
840         }
841
842         expectChannelEmpty(c, pullq.NextItem)
843 }
844
845 // TestTrashHandler
846 //
847 // Test cases:
848 //
849 // Cases tested: syntactically valid and invalid trash lists, from the
850 // data manager and from unprivileged users:
851 //
852 //   1. Valid trash list from an ordinary user
853 //      (expected result: 401 Unauthorized)
854 //
855 //   2. Invalid trash list from an ordinary user
856 //      (expected result: 401 Unauthorized)
857 //
858 //   3. Valid trash list from the data manager
859 //      (expected result: 200 OK with request body "Received 3 trash
860 //      requests"
861 //
862 //   4. Invalid trash list from the data manager
863 //      (expected result: 400 Bad Request)
864 //
865 // Test that in the end, the trash collector received a good list
866 // trash list with the expected number of requests.
867 //
868 // TODO(twp): test concurrency: launch 100 goroutines to update the
869 // pull list simultaneously.  Make sure that none of them return 400
870 // Bad Request and that replica.Dump() returns a valid list.
871 //
872 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
873         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
874         // Replace the router's trashq -- which the worker goroutines
875         // started by setup() are now receiving from -- with a new
876         // one, so we can see what the handler sends to it.
877         trashq := NewWorkQueue()
878         s.handler.Handler.(*router).trashq = trashq
879
880         var userToken = "USER TOKEN"
881         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
882
883         goodJSON := []byte(`[
884                 {
885                         "locator":"block1",
886                         "block_mtime":1409082153
887                 },
888                 {
889                         "locator":"block2",
890                         "block_mtime":1409082153
891                 },
892                 {
893                         "locator":"block3",
894                         "block_mtime":1409082153
895                 }
896         ]`)
897
898         badJSON := []byte(`I am not a valid JSON string`)
899
900         type trashTest struct {
901                 name         string
902                 req          RequestTester
903                 responseCode int
904                 responseBody string
905         }
906
907         var testcases = []trashTest{
908                 {
909                         "Valid trash list from an ordinary user",
910                         RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
911                         http.StatusUnauthorized,
912                         "Unauthorized\n",
913                 },
914                 {
915                         "Invalid trash list from an ordinary user",
916                         RequestTester{"/trash", userToken, "PUT", badJSON, ""},
917                         http.StatusUnauthorized,
918                         "Unauthorized\n",
919                 },
920                 {
921                         "Valid trash list from the data manager",
922                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
923                         http.StatusOK,
924                         "Received 3 trash requests\n",
925                 },
926                 {
927                         "Invalid trash list from the data manager",
928                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
929                         http.StatusBadRequest,
930                         "",
931                 },
932         }
933
934         for _, tst := range testcases {
935                 response := IssueRequest(s.handler, &tst.req)
936                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
937                 ExpectBody(c, tst.name, tst.responseBody, response)
938         }
939
940         // The trash collector should have received one good list with 3
941         // requests on it.
942         for i := 0; i < 3; i++ {
943                 item := <-trashq.NextItem
944                 if _, ok := item.(TrashRequest); !ok {
945                         c.Errorf("item %v could not be parsed as a TrashRequest", item)
946                 }
947         }
948
949         expectChannelEmpty(c, trashq.NextItem)
950 }
951
952 // ====================
953 // Helper functions
954 // ====================
955
956 // IssueTestRequest executes an HTTP request described by rt, to a
957 // REST router.  It returns the HTTP response to the request.
958 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
959         response := httptest.NewRecorder()
960         body := bytes.NewReader(rt.requestBody)
961         req, _ := http.NewRequest(rt.method, rt.uri, body)
962         if rt.apiToken != "" {
963                 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
964         }
965         if rt.storageClasses != "" {
966                 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
967         }
968         handler.ServeHTTP(response, req)
969         return response
970 }
971
972 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
973         response := httptest.NewRecorder()
974         body := bytes.NewReader(rt.requestBody)
975         req, _ := http.NewRequest(rt.method, rt.uri, body)
976         if rt.apiToken != "" {
977                 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
978         }
979         handler.ServeHTTP(response, req)
980         return response
981 }
982
983 // ExpectStatusCode checks whether a response has the specified status code,
984 // and reports a test failure if not.
985 func ExpectStatusCode(
986         c *check.C,
987         testname string,
988         expectedStatus int,
989         response *httptest.ResponseRecorder) {
990         c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
991 }
992
993 func ExpectBody(
994         c *check.C,
995         testname string,
996         expectedBody string,
997         response *httptest.ResponseRecorder) {
998         if expectedBody != "" && response.Body.String() != expectedBody {
999                 c.Errorf("%s: expected response body '%s', got %+v",
1000                         testname, expectedBody, response)
1001         }
1002 }
1003
1004 // See #7121
1005 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1006         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1007
1008         defer func(orig *bufferPool) {
1009                 bufs = orig
1010         }(bufs)
1011         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1012
1013         ok := make(chan struct{})
1014         go func() {
1015                 for i := 0; i < 2; i++ {
1016                         response := IssueRequest(s.handler,
1017                                 &RequestTester{
1018                                         method:      "PUT",
1019                                         uri:         "/" + TestHash,
1020                                         requestBody: TestBlock,
1021                                 })
1022                         ExpectStatusCode(c,
1023                                 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1024                 }
1025                 ok <- struct{}{}
1026         }()
1027
1028         select {
1029         case <-ok:
1030         case <-time.After(time.Second):
1031                 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1032         }
1033 }
1034
1035 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1036 // leak.
1037 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1038         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1039
1040         ok := make(chan bool)
1041         go func() {
1042                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1043                         // Unauthenticated request, no server key
1044                         // => OK (unsigned response)
1045                         unsignedLocator := "/" + TestHash
1046                         response := IssueRequest(s.handler,
1047                                 &RequestTester{
1048                                         method:      "PUT",
1049                                         uri:         unsignedLocator,
1050                                         requestBody: TestBlock,
1051                                 })
1052                         ExpectStatusCode(c,
1053                                 "TestPutHandlerBufferleak", http.StatusOK, response)
1054                         ExpectBody(c,
1055                                 "TestPutHandlerBufferleak",
1056                                 TestHashPutResp, response)
1057                 }
1058                 ok <- true
1059         }()
1060         select {
1061         case <-time.After(20 * time.Second):
1062                 // If the buffer pool leaks, the test goroutine hangs.
1063                 c.Fatal("test did not finish, assuming pool leaked")
1064         case <-ok:
1065         }
1066 }
1067
1068 type notifyingResponseRecorder struct {
1069         *httptest.ResponseRecorder
1070         closer chan bool
1071 }
1072
1073 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1074         return r.closer
1075 }
1076
1077 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1078         s.cluster.Collections.BlobSigning = false
1079         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1080
1081         defer func(orig *bufferPool) {
1082                 bufs = orig
1083         }(bufs)
1084         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1085         defer bufs.Put(bufs.Get(BlockSize))
1086
1087         if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1088                 c.Error(err)
1089         }
1090
1091         resp := &notifyingResponseRecorder{
1092                 ResponseRecorder: httptest.NewRecorder(),
1093                 closer:           make(chan bool, 1),
1094         }
1095         if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1096                 c.Fatal("notifyingResponseRecorder is broken")
1097         }
1098         // If anyone asks, the client has disconnected.
1099         resp.closer <- true
1100
1101         ok := make(chan struct{})
1102         go func() {
1103                 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1104                 s.handler.ServeHTTP(resp, req)
1105                 ok <- struct{}{}
1106         }()
1107
1108         select {
1109         case <-time.After(20 * time.Second):
1110                 c.Fatal("request took >20s, close notifier must be broken")
1111         case <-ok:
1112         }
1113
1114         ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1115         for i, v := range s.handler.volmgr.AllWritable() {
1116                 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1117                         c.Errorf("volume %d got %d calls, expected 0", i, calls)
1118                 }
1119         }
1120 }
1121
1122 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1123 // leak.
1124 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1125         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1126
1127         vols := s.handler.volmgr.AllWritable()
1128         if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1129                 c.Error(err)
1130         }
1131
1132         ok := make(chan bool)
1133         go func() {
1134                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1135                         // Unauthenticated request, unsigned locator
1136                         // => OK
1137                         unsignedLocator := "/" + TestHash
1138                         response := IssueRequest(s.handler,
1139                                 &RequestTester{
1140                                         method: "GET",
1141                                         uri:    unsignedLocator,
1142                                 })
1143                         ExpectStatusCode(c,
1144                                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1145                         ExpectBody(c,
1146                                 "Unauthenticated request, unsigned locator",
1147                                 string(TestBlock),
1148                                 response)
1149                 }
1150                 ok <- true
1151         }()
1152         select {
1153         case <-time.After(20 * time.Second):
1154                 // If the buffer pool leaks, the test goroutine hangs.
1155                 c.Fatal("test did not finish, assuming pool leaked")
1156         case <-ok:
1157         }
1158 }
1159
1160 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1161         s.cluster.Volumes = map[string]arvados.Volume{
1162                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1163                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1164                 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1165         }
1166         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1167         rt := RequestTester{
1168                 method:      "PUT",
1169                 uri:         "/" + TestHash,
1170                 requestBody: TestBlock,
1171         }
1172
1173         for _, trial := range []struct {
1174                 ask    string
1175                 expect string
1176         }{
1177                 {"", ""},
1178                 {"default", "default=1"},
1179                 {" , default , default , ", "default=1"},
1180                 {"special", "extra=1, special=1"},
1181                 {"special, readonly", "extra=1, special=1"},
1182                 {"special, nonexistent", "extra=1, special=1"},
1183                 {"extra, special", "extra=1, special=1"},
1184                 {"default, special", "default=1, extra=1, special=1"},
1185         } {
1186                 c.Logf("success case %#v", trial)
1187                 rt.storageClasses = trial.ask
1188                 resp := IssueRequest(s.handler, &rt)
1189                 if trial.expect == "" {
1190                         // any non-empty value is correct
1191                         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1192                 } else {
1193                         c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1194                 }
1195         }
1196
1197         for _, trial := range []struct {
1198                 ask string
1199         }{
1200                 {"doesnotexist"},
1201                 {"doesnotexist, readonly"},
1202                 {"readonly"},
1203         } {
1204                 c.Logf("failure case %#v", trial)
1205                 rt.storageClasses = trial.ask
1206                 resp := IssueRequest(s.handler, &rt)
1207                 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1208         }
1209 }
1210
1211 func sortCommaSeparated(s string) string {
1212         slice := strings.Split(s, ", ")
1213         sort.Strings(slice)
1214         return strings.Join(slice, ", ")
1215 }
1216
1217 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1218         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1219
1220         resp := IssueRequest(s.handler, &RequestTester{
1221                 method:      "PUT",
1222                 uri:         "/" + TestHash,
1223                 requestBody: TestBlock,
1224         })
1225         c.Logf("%#v", resp)
1226         c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1227         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1228 }
1229
1230 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1231         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1232
1233         // Set up Keep volumes
1234         vols := s.handler.volmgr.AllWritable()
1235         vols[0].Put(context.Background(), TestHash, TestBlock)
1236
1237         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1238
1239         // unauthenticatedReq => UnauthorizedError
1240         unauthenticatedReq := &RequestTester{
1241                 method: "PUT",
1242                 uri:    "/untrash/" + TestHash,
1243         }
1244         response := IssueRequest(s.handler, unauthenticatedReq)
1245         ExpectStatusCode(c,
1246                 "Unauthenticated request",
1247                 UnauthorizedError.HTTPCode,
1248                 response)
1249
1250         // notDataManagerReq => UnauthorizedError
1251         notDataManagerReq := &RequestTester{
1252                 method:   "PUT",
1253                 uri:      "/untrash/" + TestHash,
1254                 apiToken: knownToken,
1255         }
1256
1257         response = IssueRequest(s.handler, notDataManagerReq)
1258         ExpectStatusCode(c,
1259                 "Non-datamanager token",
1260                 UnauthorizedError.HTTPCode,
1261                 response)
1262
1263         // datamanagerWithBadHashReq => StatusBadRequest
1264         datamanagerWithBadHashReq := &RequestTester{
1265                 method:   "PUT",
1266                 uri:      "/untrash/thisisnotalocator",
1267                 apiToken: s.cluster.SystemRootToken,
1268         }
1269         response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1270         ExpectStatusCode(c,
1271                 "Bad locator in untrash request",
1272                 http.StatusBadRequest,
1273                 response)
1274
1275         // datamanagerWrongMethodReq => StatusBadRequest
1276         datamanagerWrongMethodReq := &RequestTester{
1277                 method:   "GET",
1278                 uri:      "/untrash/" + TestHash,
1279                 apiToken: s.cluster.SystemRootToken,
1280         }
1281         response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1282         ExpectStatusCode(c,
1283                 "Only PUT method is supported for untrash",
1284                 http.StatusMethodNotAllowed,
1285                 response)
1286
1287         // datamanagerReq => StatusOK
1288         datamanagerReq := &RequestTester{
1289                 method:   "PUT",
1290                 uri:      "/untrash/" + TestHash,
1291                 apiToken: s.cluster.SystemRootToken,
1292         }
1293         response = IssueRequest(s.handler, datamanagerReq)
1294         ExpectStatusCode(c,
1295                 "",
1296                 http.StatusOK,
1297                 response)
1298         c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1299 }
1300
1301 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1302         // Change all volumes to read-only
1303         for uuid, v := range s.cluster.Volumes {
1304                 v.ReadOnly = true
1305                 s.cluster.Volumes[uuid] = v
1306         }
1307         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1308
1309         // datamanagerReq => StatusOK
1310         datamanagerReq := &RequestTester{
1311                 method:   "PUT",
1312                 uri:      "/untrash/" + TestHash,
1313                 apiToken: s.cluster.SystemRootToken,
1314         }
1315         response := IssueRequest(s.handler, datamanagerReq)
1316         ExpectStatusCode(c,
1317                 "No writable volumes",
1318                 http.StatusNotFound,
1319                 response)
1320 }
1321
1322 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1323         s.cluster.ManagementToken = arvadostest.ManagementToken
1324         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1325         pingReq := &RequestTester{
1326                 method:   "GET",
1327                 uri:      "/_health/ping",
1328                 apiToken: arvadostest.ManagementToken,
1329         }
1330         response := IssueHealthCheckRequest(s.handler, pingReq)
1331         ExpectStatusCode(c,
1332                 "",
1333                 http.StatusOK,
1334                 response)
1335         want := `{"health":"OK"}`
1336         if !strings.Contains(response.Body.String(), want) {
1337                 c.Errorf("expected response to include %s: got %s", want, response.Body.String())
1338         }
1339 }