17967: Read from volumes with high-priority storage classes first.
[arvados.git] / services / keepstore / handler_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Tests for Keep HTTP handlers:
6 //
7 //     GetBlockHandler
8 //     PutBlockHandler
9 //     IndexHandler
10 //
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
13
14 package main
15
16 import (
17         "bytes"
18         "context"
19         "encoding/json"
20         "fmt"
21         "net/http"
22         "net/http/httptest"
23         "os"
24         "regexp"
25         "sort"
26         "strings"
27         "time"
28
29         "git.arvados.org/arvados.git/lib/config"
30         "git.arvados.org/arvados.git/sdk/go/arvados"
31         "git.arvados.org/arvados.git/sdk/go/arvadostest"
32         "git.arvados.org/arvados.git/sdk/go/ctxlog"
33         "github.com/prometheus/client_golang/prometheus"
34         check "gopkg.in/check.v1"
35 )
36
37 var testServiceURL = func() arvados.URL {
38         return arvados.URL{Host: "localhost:12345", Scheme: "http"}
39 }()
40
41 func testCluster(t TB) *arvados.Cluster {
42         cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
43         if err != nil {
44                 t.Fatal(err)
45         }
46         cluster, err := cfg.GetCluster("")
47         if err != nil {
48                 t.Fatal(err)
49         }
50         cluster.SystemRootToken = arvadostest.SystemRootToken
51         cluster.ManagementToken = arvadostest.ManagementToken
52         cluster.Collections.BlobSigning = false
53         return cluster
54 }
55
56 var _ = check.Suite(&HandlerSuite{})
57
58 type HandlerSuite struct {
59         cluster *arvados.Cluster
60         handler *handler
61 }
62
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64         s.cluster = testCluster(c)
65         s.cluster.Volumes = map[string]arvados.Volume{
66                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
68         }
69         s.handler = &handler{}
70 }
71
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
75         uri            string
76         apiToken       string
77         method         string
78         requestBody    []byte
79         storageClasses string
80 }
81
82 // Test GetBlockHandler on the following situations:
83 //   - permissions off, unauthenticated request, unsigned locator
84 //   - permissions on, authenticated request, signed locator
85 //   - permissions on, authenticated request, unsigned locator
86 //   - permissions on, unauthenticated request, signed locator
87 //   - permissions on, authenticated request, expired locator
88 //   - permissions on, authenticated request, signed locator, transient error from backend
89 //
90 func (s *HandlerSuite) TestGetHandler(c *check.C) {
91         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
92
93         vols := s.handler.volmgr.AllWritable()
94         err := vols[0].Put(context.Background(), TestHash, TestBlock)
95         c.Check(err, check.IsNil)
96
97         // Create locators for testing.
98         // Turn on permission settings so we can generate signed locators.
99         s.cluster.Collections.BlobSigning = true
100         s.cluster.Collections.BlobSigningKey = knownKey
101         s.cluster.Collections.BlobSigningTTL.Set("5m")
102
103         var (
104                 unsignedLocator  = "/" + TestHash
105                 validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
106                 expiredTimestamp = time.Now().Add(-time.Hour)
107                 signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
108                 expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
109         )
110
111         // -----------------
112         // Test unauthenticated request with permissions off.
113         s.cluster.Collections.BlobSigning = false
114
115         // Unauthenticated request, unsigned locator
116         // => OK
117         response := IssueRequest(s.handler,
118                 &RequestTester{
119                         method: "GET",
120                         uri:    unsignedLocator,
121                 })
122         ExpectStatusCode(c,
123                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
124         ExpectBody(c,
125                 "Unauthenticated request, unsigned locator",
126                 string(TestBlock),
127                 response)
128
129         receivedLen := response.Header().Get("Content-Length")
130         expectedLen := fmt.Sprintf("%d", len(TestBlock))
131         if receivedLen != expectedLen {
132                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
133         }
134
135         // ----------------
136         // Permissions: on.
137         s.cluster.Collections.BlobSigning = true
138
139         // Authenticated request, signed locator
140         // => OK
141         response = IssueRequest(s.handler, &RequestTester{
142                 method:   "GET",
143                 uri:      signedLocator,
144                 apiToken: knownToken,
145         })
146         ExpectStatusCode(c,
147                 "Authenticated request, signed locator", http.StatusOK, response)
148         ExpectBody(c,
149                 "Authenticated request, signed locator", string(TestBlock), response)
150
151         receivedLen = response.Header().Get("Content-Length")
152         expectedLen = fmt.Sprintf("%d", len(TestBlock))
153         if receivedLen != expectedLen {
154                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
155         }
156
157         // Authenticated request, unsigned locator
158         // => PermissionError
159         response = IssueRequest(s.handler, &RequestTester{
160                 method:   "GET",
161                 uri:      unsignedLocator,
162                 apiToken: knownToken,
163         })
164         ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
165
166         // Unauthenticated request, signed locator
167         // => PermissionError
168         response = IssueRequest(s.handler, &RequestTester{
169                 method: "GET",
170                 uri:    signedLocator,
171         })
172         ExpectStatusCode(c,
173                 "Unauthenticated request, signed locator",
174                 PermissionError.HTTPCode, response)
175
176         // Authenticated request, expired locator
177         // => ExpiredError
178         response = IssueRequest(s.handler, &RequestTester{
179                 method:   "GET",
180                 uri:      expiredLocator,
181                 apiToken: knownToken,
182         })
183         ExpectStatusCode(c,
184                 "Authenticated request, expired locator",
185                 ExpiredError.HTTPCode, response)
186
187         // Authenticated request, signed locator
188         // => 503 Server busy (transient error)
189
190         // Set up the block owning volume to respond with errors
191         vols[0].Volume.(*MockVolume).Bad = true
192         vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
193         response = IssueRequest(s.handler, &RequestTester{
194                 method:   "GET",
195                 uri:      signedLocator,
196                 apiToken: knownToken,
197         })
198         // A transient error from one volume while the other doesn't find the block
199         // should make the service return a 503 so that clients can retry.
200         ExpectStatusCode(c,
201                 "Volume backend busy",
202                 503, response)
203 }
204
205 // Test PutBlockHandler on the following situations:
206 //   - no server key
207 //   - with server key, authenticated request, unsigned locator
208 //   - with server key, unauthenticated request, unsigned locator
209 //
210 func (s *HandlerSuite) TestPutHandler(c *check.C) {
211         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
212
213         // --------------
214         // No server key.
215
216         s.cluster.Collections.BlobSigningKey = ""
217
218         // Unauthenticated request, no server key
219         // => OK (unsigned response)
220         unsignedLocator := "/" + TestHash
221         response := IssueRequest(s.handler,
222                 &RequestTester{
223                         method:      "PUT",
224                         uri:         unsignedLocator,
225                         requestBody: TestBlock,
226                 })
227
228         ExpectStatusCode(c,
229                 "Unauthenticated request, no server key", http.StatusOK, response)
230         ExpectBody(c,
231                 "Unauthenticated request, no server key",
232                 TestHashPutResp, response)
233
234         // ------------------
235         // With a server key.
236
237         s.cluster.Collections.BlobSigningKey = knownKey
238         s.cluster.Collections.BlobSigningTTL.Set("5m")
239
240         // When a permission key is available, the locator returned
241         // from an authenticated PUT request will be signed.
242
243         // Authenticated PUT, signed locator
244         // => OK (signed response)
245         response = IssueRequest(s.handler,
246                 &RequestTester{
247                         method:      "PUT",
248                         uri:         unsignedLocator,
249                         requestBody: TestBlock,
250                         apiToken:    knownToken,
251                 })
252
253         ExpectStatusCode(c,
254                 "Authenticated PUT, signed locator, with server key",
255                 http.StatusOK, response)
256         responseLocator := strings.TrimSpace(response.Body.String())
257         if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
258                 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
259                         "response '%s' does not contain a valid signature",
260                         responseLocator)
261         }
262
263         // Unauthenticated PUT, unsigned locator
264         // => OK
265         response = IssueRequest(s.handler,
266                 &RequestTester{
267                         method:      "PUT",
268                         uri:         unsignedLocator,
269                         requestBody: TestBlock,
270                 })
271
272         ExpectStatusCode(c,
273                 "Unauthenticated PUT, unsigned locator, with server key",
274                 http.StatusOK, response)
275         ExpectBody(c,
276                 "Unauthenticated PUT, unsigned locator, with server key",
277                 TestHashPutResp, response)
278 }
279
280 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
281         s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
282         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
283
284         s.cluster.SystemRootToken = "fake-data-manager-token"
285         IssueRequest(s.handler,
286                 &RequestTester{
287                         method:      "PUT",
288                         uri:         "/" + TestHash,
289                         requestBody: TestBlock,
290                 })
291
292         s.cluster.Collections.BlobTrash = true
293         IssueRequest(s.handler,
294                 &RequestTester{
295                         method:      "DELETE",
296                         uri:         "/" + TestHash,
297                         requestBody: TestBlock,
298                         apiToken:    s.cluster.SystemRootToken,
299                 })
300         type expect struct {
301                 volid     string
302                 method    string
303                 callcount int
304         }
305         for _, e := range []expect{
306                 {"zzzzz-nyw5e-000000000000000", "Get", 0},
307                 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
308                 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
309                 {"zzzzz-nyw5e-000000000000000", "Put", 0},
310                 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
311                 {"zzzzz-nyw5e-111111111111111", "Get", 0},
312                 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
313                 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
314                 {"zzzzz-nyw5e-111111111111111", "Put", 1},
315                 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
316         } {
317                 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
318                         c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
319                 }
320         }
321 }
322
323 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
324         s.cluster.Volumes = map[string]arvados.Volume{
325                 "zzzzz-nyw5e-111111111111111": {
326                         Driver:         "mock",
327                         Replication:    1,
328                         StorageClasses: map[string]bool{"class1": true}},
329                 "zzzzz-nyw5e-222222222222222": {
330                         Driver:         "mock",
331                         Replication:    1,
332                         StorageClasses: map[string]bool{"class2": true, "class3": true}},
333         }
334
335         for _, trial := range []struct {
336                 priority1 int // priority of class1, thus vol1
337                 priority2 int // priority of class2
338                 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
339                 get1      int // expected number of "get" ops on vol1
340                 get2      int // expected number of "get" ops on vol2
341         }{
342                 {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
343                 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
344                 {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
345                 {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
346         } {
347                 c.Logf("%+v", trial)
348                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
349                         "class1": {Priority: trial.priority1},
350                         "class2": {Priority: trial.priority2},
351                         "class3": {Priority: trial.priority3},
352                 }
353                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
354                 IssueRequest(s.handler,
355                         &RequestTester{
356                                 method:         "PUT",
357                                 uri:            "/" + TestHash,
358                                 requestBody:    TestBlock,
359                                 storageClasses: "class1",
360                         })
361                 IssueRequest(s.handler,
362                         &RequestTester{
363                                 method: "GET",
364                                 uri:    "/" + TestHash,
365                         })
366                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
367                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
368         }
369 }
370
371 // Test TOUCH requests.
372 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
373         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
374         vols := s.handler.volmgr.AllWritable()
375         vols[0].Put(context.Background(), TestHash, TestBlock)
376         vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
377         afterPut := time.Now()
378         t, err := vols[0].Mtime(TestHash)
379         c.Assert(err, check.IsNil)
380         c.Assert(t.Before(afterPut), check.Equals, true)
381
382         ExpectStatusCode(c,
383                 "touch with no credentials",
384                 http.StatusUnauthorized,
385                 IssueRequest(s.handler, &RequestTester{
386                         method: "TOUCH",
387                         uri:    "/" + TestHash,
388                 }))
389
390         ExpectStatusCode(c,
391                 "touch with non-root credentials",
392                 http.StatusUnauthorized,
393                 IssueRequest(s.handler, &RequestTester{
394                         method:   "TOUCH",
395                         uri:      "/" + TestHash,
396                         apiToken: arvadostest.ActiveTokenV2,
397                 }))
398
399         ExpectStatusCode(c,
400                 "touch non-existent block",
401                 http.StatusNotFound,
402                 IssueRequest(s.handler, &RequestTester{
403                         method:   "TOUCH",
404                         uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
405                         apiToken: s.cluster.SystemRootToken,
406                 }))
407
408         beforeTouch := time.Now()
409         ExpectStatusCode(c,
410                 "touch block",
411                 http.StatusOK,
412                 IssueRequest(s.handler, &RequestTester{
413                         method:   "TOUCH",
414                         uri:      "/" + TestHash,
415                         apiToken: s.cluster.SystemRootToken,
416                 }))
417         t, err = vols[0].Mtime(TestHash)
418         c.Assert(err, check.IsNil)
419         c.Assert(t.After(beforeTouch), check.Equals, true)
420 }
421
422 // Test /index requests:
423 //   - unauthenticated /index request
424 //   - unauthenticated /index/prefix request
425 //   - authenticated   /index request        | non-superuser
426 //   - authenticated   /index/prefix request | non-superuser
427 //   - authenticated   /index request        | superuser
428 //   - authenticated   /index/prefix request | superuser
429 //
430 // The only /index requests that should succeed are those issued by the
431 // superuser. They should pass regardless of the value of BlobSigning.
432 //
433 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
434         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
435
436         // Include multiple blocks on different volumes, and
437         // some metadata files (which should be omitted from index listings)
438         vols := s.handler.volmgr.AllWritable()
439         vols[0].Put(context.Background(), TestHash, TestBlock)
440         vols[1].Put(context.Background(), TestHash2, TestBlock2)
441         vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
442         vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
443
444         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
445
446         unauthenticatedReq := &RequestTester{
447                 method: "GET",
448                 uri:    "/index",
449         }
450         authenticatedReq := &RequestTester{
451                 method:   "GET",
452                 uri:      "/index",
453                 apiToken: knownToken,
454         }
455         superuserReq := &RequestTester{
456                 method:   "GET",
457                 uri:      "/index",
458                 apiToken: s.cluster.SystemRootToken,
459         }
460         unauthPrefixReq := &RequestTester{
461                 method: "GET",
462                 uri:    "/index/" + TestHash[0:3],
463         }
464         authPrefixReq := &RequestTester{
465                 method:   "GET",
466                 uri:      "/index/" + TestHash[0:3],
467                 apiToken: knownToken,
468         }
469         superuserPrefixReq := &RequestTester{
470                 method:   "GET",
471                 uri:      "/index/" + TestHash[0:3],
472                 apiToken: s.cluster.SystemRootToken,
473         }
474         superuserNoSuchPrefixReq := &RequestTester{
475                 method:   "GET",
476                 uri:      "/index/abcd",
477                 apiToken: s.cluster.SystemRootToken,
478         }
479         superuserInvalidPrefixReq := &RequestTester{
480                 method:   "GET",
481                 uri:      "/index/xyz",
482                 apiToken: s.cluster.SystemRootToken,
483         }
484
485         // -------------------------------------------------------------
486         // Only the superuser should be allowed to issue /index requests.
487
488         // ---------------------------
489         // BlobSigning enabled
490         // This setting should not affect tests passing.
491         s.cluster.Collections.BlobSigning = true
492
493         // unauthenticated /index request
494         // => UnauthorizedError
495         response := IssueRequest(s.handler, unauthenticatedReq)
496         ExpectStatusCode(c,
497                 "permissions on, unauthenticated request",
498                 UnauthorizedError.HTTPCode,
499                 response)
500
501         // unauthenticated /index/prefix request
502         // => UnauthorizedError
503         response = IssueRequest(s.handler, unauthPrefixReq)
504         ExpectStatusCode(c,
505                 "permissions on, unauthenticated /index/prefix request",
506                 UnauthorizedError.HTTPCode,
507                 response)
508
509         // authenticated /index request, non-superuser
510         // => UnauthorizedError
511         response = IssueRequest(s.handler, authenticatedReq)
512         ExpectStatusCode(c,
513                 "permissions on, authenticated request, non-superuser",
514                 UnauthorizedError.HTTPCode,
515                 response)
516
517         // authenticated /index/prefix request, non-superuser
518         // => UnauthorizedError
519         response = IssueRequest(s.handler, authPrefixReq)
520         ExpectStatusCode(c,
521                 "permissions on, authenticated /index/prefix request, non-superuser",
522                 UnauthorizedError.HTTPCode,
523                 response)
524
525         // superuser /index request
526         // => OK
527         response = IssueRequest(s.handler, superuserReq)
528         ExpectStatusCode(c,
529                 "permissions on, superuser request",
530                 http.StatusOK,
531                 response)
532
533         // ----------------------------
534         // BlobSigning disabled
535         // Valid Request should still pass.
536         s.cluster.Collections.BlobSigning = false
537
538         // superuser /index request
539         // => OK
540         response = IssueRequest(s.handler, superuserReq)
541         ExpectStatusCode(c,
542                 "permissions on, superuser request",
543                 http.StatusOK,
544                 response)
545
546         expected := `^` + TestHash + `\+\d+ \d+\n` +
547                 TestHash2 + `\+\d+ \d+\n\n$`
548         match, _ := regexp.MatchString(expected, response.Body.String())
549         if !match {
550                 c.Errorf(
551                         "permissions on, superuser request: expected %s, got:\n%s",
552                         expected, response.Body.String())
553         }
554
555         // superuser /index/prefix request
556         // => OK
557         response = IssueRequest(s.handler, superuserPrefixReq)
558         ExpectStatusCode(c,
559                 "permissions on, superuser request",
560                 http.StatusOK,
561                 response)
562
563         expected = `^` + TestHash + `\+\d+ \d+\n\n$`
564         match, _ = regexp.MatchString(expected, response.Body.String())
565         if !match {
566                 c.Errorf(
567                         "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
568                         expected, response.Body.String())
569         }
570
571         // superuser /index/{no-such-prefix} request
572         // => OK
573         response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
574         ExpectStatusCode(c,
575                 "permissions on, superuser request",
576                 http.StatusOK,
577                 response)
578
579         if "\n" != response.Body.String() {
580                 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
581         }
582
583         // superuser /index/{invalid-prefix} request
584         // => StatusBadRequest
585         response = IssueRequest(s.handler, superuserInvalidPrefixReq)
586         ExpectStatusCode(c,
587                 "permissions on, superuser request",
588                 http.StatusBadRequest,
589                 response)
590 }
591
592 // TestDeleteHandler
593 //
594 // Cases tested:
595 //
596 //   With no token and with a non-data-manager token:
597 //   * Delete existing block
598 //     (test for 403 Forbidden, confirm block not deleted)
599 //
600 //   With data manager token:
601 //
602 //   * Delete existing block
603 //     (test for 200 OK, response counts, confirm block deleted)
604 //
605 //   * Delete nonexistent block
606 //     (test for 200 OK, response counts)
607 //
608 //   TODO(twp):
609 //
610 //   * Delete block on read-only and read-write volume
611 //     (test for 200 OK, response with copies_deleted=1,
612 //     copies_failed=1, confirm block deleted only on r/w volume)
613 //
614 //   * Delete block on read-only volume only
615 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
616 //     confirm block not deleted)
617 //
618 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
619         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
620
621         vols := s.handler.volmgr.AllWritable()
622         vols[0].Put(context.Background(), TestHash, TestBlock)
623
624         // Explicitly set the BlobSigningTTL to 0 for these
625         // tests, to ensure the MockVolume deletes the blocks
626         // even though they have just been created.
627         s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
628
629         var userToken = "NOT DATA MANAGER TOKEN"
630         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
631
632         s.cluster.Collections.BlobTrash = true
633
634         unauthReq := &RequestTester{
635                 method: "DELETE",
636                 uri:    "/" + TestHash,
637         }
638
639         userReq := &RequestTester{
640                 method:   "DELETE",
641                 uri:      "/" + TestHash,
642                 apiToken: userToken,
643         }
644
645         superuserExistingBlockReq := &RequestTester{
646                 method:   "DELETE",
647                 uri:      "/" + TestHash,
648                 apiToken: s.cluster.SystemRootToken,
649         }
650
651         superuserNonexistentBlockReq := &RequestTester{
652                 method:   "DELETE",
653                 uri:      "/" + TestHash2,
654                 apiToken: s.cluster.SystemRootToken,
655         }
656
657         // Unauthenticated request returns PermissionError.
658         var response *httptest.ResponseRecorder
659         response = IssueRequest(s.handler, unauthReq)
660         ExpectStatusCode(c,
661                 "unauthenticated request",
662                 PermissionError.HTTPCode,
663                 response)
664
665         // Authenticated non-admin request returns PermissionError.
666         response = IssueRequest(s.handler, userReq)
667         ExpectStatusCode(c,
668                 "authenticated non-admin request",
669                 PermissionError.HTTPCode,
670                 response)
671
672         // Authenticated admin request for nonexistent block.
673         type deletecounter struct {
674                 Deleted int `json:"copies_deleted"`
675                 Failed  int `json:"copies_failed"`
676         }
677         var responseDc, expectedDc deletecounter
678
679         response = IssueRequest(s.handler, superuserNonexistentBlockReq)
680         ExpectStatusCode(c,
681                 "data manager request, nonexistent block",
682                 http.StatusNotFound,
683                 response)
684
685         // Authenticated admin request for existing block while BlobTrash is false.
686         s.cluster.Collections.BlobTrash = false
687         response = IssueRequest(s.handler, superuserExistingBlockReq)
688         ExpectStatusCode(c,
689                 "authenticated request, existing block, method disabled",
690                 MethodDisabledError.HTTPCode,
691                 response)
692         s.cluster.Collections.BlobTrash = true
693
694         // Authenticated admin request for existing block.
695         response = IssueRequest(s.handler, superuserExistingBlockReq)
696         ExpectStatusCode(c,
697                 "data manager request, existing block",
698                 http.StatusOK,
699                 response)
700         // Expect response {"copies_deleted":1,"copies_failed":0}
701         expectedDc = deletecounter{1, 0}
702         json.NewDecoder(response.Body).Decode(&responseDc)
703         if responseDc != expectedDc {
704                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
705                         expectedDc, responseDc)
706         }
707         // Confirm the block has been deleted
708         buf := make([]byte, BlockSize)
709         _, err := vols[0].Get(context.Background(), TestHash, buf)
710         var blockDeleted = os.IsNotExist(err)
711         if !blockDeleted {
712                 c.Error("superuserExistingBlockReq: block not deleted")
713         }
714
715         // A DELETE request on a block newer than BlobSigningTTL
716         // should return success but leave the block on the volume.
717         vols[0].Put(context.Background(), TestHash, TestBlock)
718         s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
719
720         response = IssueRequest(s.handler, superuserExistingBlockReq)
721         ExpectStatusCode(c,
722                 "data manager request, existing block",
723                 http.StatusOK,
724                 response)
725         // Expect response {"copies_deleted":1,"copies_failed":0}
726         expectedDc = deletecounter{1, 0}
727         json.NewDecoder(response.Body).Decode(&responseDc)
728         if responseDc != expectedDc {
729                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
730                         expectedDc, responseDc)
731         }
732         // Confirm the block has NOT been deleted.
733         _, err = vols[0].Get(context.Background(), TestHash, buf)
734         if err != nil {
735                 c.Errorf("testing delete on new block: %s\n", err)
736         }
737 }
738
739 // TestPullHandler
740 //
741 // Test handling of the PUT /pull statement.
742 //
743 // Cases tested: syntactically valid and invalid pull lists, from the
744 // data manager and from unprivileged users:
745 //
746 //   1. Valid pull list from an ordinary user
747 //      (expected result: 401 Unauthorized)
748 //
749 //   2. Invalid pull request from an ordinary user
750 //      (expected result: 401 Unauthorized)
751 //
752 //   3. Valid pull request from the data manager
753 //      (expected result: 200 OK with request body "Received 3 pull
754 //      requests"
755 //
756 //   4. Invalid pull request from the data manager
757 //      (expected result: 400 Bad Request)
758 //
759 // Test that in the end, the pull manager received a good pull list with
760 // the expected number of requests.
761 //
762 // TODO(twp): test concurrency: launch 100 goroutines to update the
763 // pull list simultaneously.  Make sure that none of them return 400
764 // Bad Request and that pullq.GetList() returns a valid list.
765 //
766 func (s *HandlerSuite) TestPullHandler(c *check.C) {
767         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
768
769         // Replace the router's pullq -- which the worker goroutines
770         // started by setup() are now receiving from -- with a new
771         // one, so we can see what the handler sends to it.
772         pullq := NewWorkQueue()
773         s.handler.Handler.(*router).pullq = pullq
774
775         var userToken = "USER TOKEN"
776         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
777
778         goodJSON := []byte(`[
779                 {
780                         "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
781                         "servers":[
782                                 "http://server1",
783                                 "http://server2"
784                         ]
785                 },
786                 {
787                         "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
788                         "servers":[]
789                 },
790                 {
791                         "locator":"cccccccccccccccccccccccccccccccc+12345",
792                         "servers":["http://server1"]
793                 }
794         ]`)
795
796         badJSON := []byte(`{ "key":"I'm a little teapot" }`)
797
798         type pullTest struct {
799                 name         string
800                 req          RequestTester
801                 responseCode int
802                 responseBody string
803         }
804         var testcases = []pullTest{
805                 {
806                         "Valid pull list from an ordinary user",
807                         RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
808                         http.StatusUnauthorized,
809                         "Unauthorized\n",
810                 },
811                 {
812                         "Invalid pull request from an ordinary user",
813                         RequestTester{"/pull", userToken, "PUT", badJSON, ""},
814                         http.StatusUnauthorized,
815                         "Unauthorized\n",
816                 },
817                 {
818                         "Valid pull request from the data manager",
819                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
820                         http.StatusOK,
821                         "Received 3 pull requests\n",
822                 },
823                 {
824                         "Invalid pull request from the data manager",
825                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
826                         http.StatusBadRequest,
827                         "",
828                 },
829         }
830
831         for _, tst := range testcases {
832                 response := IssueRequest(s.handler, &tst.req)
833                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
834                 ExpectBody(c, tst.name, tst.responseBody, response)
835         }
836
837         // The Keep pull manager should have received one good list with 3
838         // requests on it.
839         for i := 0; i < 3; i++ {
840                 var item interface{}
841                 select {
842                 case item = <-pullq.NextItem:
843                 case <-time.After(time.Second):
844                         c.Error("timed out")
845                 }
846                 if _, ok := item.(PullRequest); !ok {
847                         c.Errorf("item %v could not be parsed as a PullRequest", item)
848                 }
849         }
850
851         expectChannelEmpty(c, pullq.NextItem)
852 }
853
854 // TestTrashHandler
855 //
856 // Test cases:
857 //
858 // Cases tested: syntactically valid and invalid trash lists, from the
859 // data manager and from unprivileged users:
860 //
861 //   1. Valid trash list from an ordinary user
862 //      (expected result: 401 Unauthorized)
863 //
864 //   2. Invalid trash list from an ordinary user
865 //      (expected result: 401 Unauthorized)
866 //
867 //   3. Valid trash list from the data manager
868 //      (expected result: 200 OK with request body "Received 3 trash
869 //      requests"
870 //
871 //   4. Invalid trash list from the data manager
872 //      (expected result: 400 Bad Request)
873 //
874 // Test that in the end, the trash collector received a good list
875 // trash list with the expected number of requests.
876 //
877 // TODO(twp): test concurrency: launch 100 goroutines to update the
878 // pull list simultaneously.  Make sure that none of them return 400
879 // Bad Request and that replica.Dump() returns a valid list.
880 //
881 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
882         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
883         // Replace the router's trashq -- which the worker goroutines
884         // started by setup() are now receiving from -- with a new
885         // one, so we can see what the handler sends to it.
886         trashq := NewWorkQueue()
887         s.handler.Handler.(*router).trashq = trashq
888
889         var userToken = "USER TOKEN"
890         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
891
892         goodJSON := []byte(`[
893                 {
894                         "locator":"block1",
895                         "block_mtime":1409082153
896                 },
897                 {
898                         "locator":"block2",
899                         "block_mtime":1409082153
900                 },
901                 {
902                         "locator":"block3",
903                         "block_mtime":1409082153
904                 }
905         ]`)
906
907         badJSON := []byte(`I am not a valid JSON string`)
908
909         type trashTest struct {
910                 name         string
911                 req          RequestTester
912                 responseCode int
913                 responseBody string
914         }
915
916         var testcases = []trashTest{
917                 {
918                         "Valid trash list from an ordinary user",
919                         RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
920                         http.StatusUnauthorized,
921                         "Unauthorized\n",
922                 },
923                 {
924                         "Invalid trash list from an ordinary user",
925                         RequestTester{"/trash", userToken, "PUT", badJSON, ""},
926                         http.StatusUnauthorized,
927                         "Unauthorized\n",
928                 },
929                 {
930                         "Valid trash list from the data manager",
931                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
932                         http.StatusOK,
933                         "Received 3 trash requests\n",
934                 },
935                 {
936                         "Invalid trash list from the data manager",
937                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
938                         http.StatusBadRequest,
939                         "",
940                 },
941         }
942
943         for _, tst := range testcases {
944                 response := IssueRequest(s.handler, &tst.req)
945                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
946                 ExpectBody(c, tst.name, tst.responseBody, response)
947         }
948
949         // The trash collector should have received one good list with 3
950         // requests on it.
951         for i := 0; i < 3; i++ {
952                 item := <-trashq.NextItem
953                 if _, ok := item.(TrashRequest); !ok {
954                         c.Errorf("item %v could not be parsed as a TrashRequest", item)
955                 }
956         }
957
958         expectChannelEmpty(c, trashq.NextItem)
959 }
960
961 // ====================
962 // Helper functions
963 // ====================
964
965 // IssueTestRequest executes an HTTP request described by rt, to a
966 // REST router.  It returns the HTTP response to the request.
967 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
968         response := httptest.NewRecorder()
969         body := bytes.NewReader(rt.requestBody)
970         req, _ := http.NewRequest(rt.method, rt.uri, body)
971         if rt.apiToken != "" {
972                 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
973         }
974         if rt.storageClasses != "" {
975                 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
976         }
977         handler.ServeHTTP(response, req)
978         return response
979 }
980
981 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
982         response := httptest.NewRecorder()
983         body := bytes.NewReader(rt.requestBody)
984         req, _ := http.NewRequest(rt.method, rt.uri, body)
985         if rt.apiToken != "" {
986                 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
987         }
988         handler.ServeHTTP(response, req)
989         return response
990 }
991
992 // ExpectStatusCode checks whether a response has the specified status code,
993 // and reports a test failure if not.
994 func ExpectStatusCode(
995         c *check.C,
996         testname string,
997         expectedStatus int,
998         response *httptest.ResponseRecorder) {
999         c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
1000 }
1001
1002 func ExpectBody(
1003         c *check.C,
1004         testname string,
1005         expectedBody string,
1006         response *httptest.ResponseRecorder) {
1007         if expectedBody != "" && response.Body.String() != expectedBody {
1008                 c.Errorf("%s: expected response body '%s', got %+v",
1009                         testname, expectedBody, response)
1010         }
1011 }
1012
1013 // See #7121
1014 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1015         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1016
1017         defer func(orig *bufferPool) {
1018                 bufs = orig
1019         }(bufs)
1020         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1021
1022         ok := make(chan struct{})
1023         go func() {
1024                 for i := 0; i < 2; i++ {
1025                         response := IssueRequest(s.handler,
1026                                 &RequestTester{
1027                                         method:      "PUT",
1028                                         uri:         "/" + TestHash,
1029                                         requestBody: TestBlock,
1030                                 })
1031                         ExpectStatusCode(c,
1032                                 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1033                 }
1034                 ok <- struct{}{}
1035         }()
1036
1037         select {
1038         case <-ok:
1039         case <-time.After(time.Second):
1040                 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1041         }
1042 }
1043
1044 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1045 // leak.
1046 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1047         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1048
1049         ok := make(chan bool)
1050         go func() {
1051                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1052                         // Unauthenticated request, no server key
1053                         // => OK (unsigned response)
1054                         unsignedLocator := "/" + TestHash
1055                         response := IssueRequest(s.handler,
1056                                 &RequestTester{
1057                                         method:      "PUT",
1058                                         uri:         unsignedLocator,
1059                                         requestBody: TestBlock,
1060                                 })
1061                         ExpectStatusCode(c,
1062                                 "TestPutHandlerBufferleak", http.StatusOK, response)
1063                         ExpectBody(c,
1064                                 "TestPutHandlerBufferleak",
1065                                 TestHashPutResp, response)
1066                 }
1067                 ok <- true
1068         }()
1069         select {
1070         case <-time.After(20 * time.Second):
1071                 // If the buffer pool leaks, the test goroutine hangs.
1072                 c.Fatal("test did not finish, assuming pool leaked")
1073         case <-ok:
1074         }
1075 }
1076
1077 type notifyingResponseRecorder struct {
1078         *httptest.ResponseRecorder
1079         closer chan bool
1080 }
1081
1082 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1083         return r.closer
1084 }
1085
1086 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1087         s.cluster.Collections.BlobSigning = false
1088         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1089
1090         defer func(orig *bufferPool) {
1091                 bufs = orig
1092         }(bufs)
1093         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1094         defer bufs.Put(bufs.Get(BlockSize))
1095
1096         if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1097                 c.Error(err)
1098         }
1099
1100         resp := &notifyingResponseRecorder{
1101                 ResponseRecorder: httptest.NewRecorder(),
1102                 closer:           make(chan bool, 1),
1103         }
1104         if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1105                 c.Fatal("notifyingResponseRecorder is broken")
1106         }
1107         // If anyone asks, the client has disconnected.
1108         resp.closer <- true
1109
1110         ok := make(chan struct{})
1111         go func() {
1112                 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1113                 s.handler.ServeHTTP(resp, req)
1114                 ok <- struct{}{}
1115         }()
1116
1117         select {
1118         case <-time.After(20 * time.Second):
1119                 c.Fatal("request took >20s, close notifier must be broken")
1120         case <-ok:
1121         }
1122
1123         ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1124         for i, v := range s.handler.volmgr.AllWritable() {
1125                 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1126                         c.Errorf("volume %d got %d calls, expected 0", i, calls)
1127                 }
1128         }
1129 }
1130
1131 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1132 // leak.
1133 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1134         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1135
1136         vols := s.handler.volmgr.AllWritable()
1137         if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1138                 c.Error(err)
1139         }
1140
1141         ok := make(chan bool)
1142         go func() {
1143                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1144                         // Unauthenticated request, unsigned locator
1145                         // => OK
1146                         unsignedLocator := "/" + TestHash
1147                         response := IssueRequest(s.handler,
1148                                 &RequestTester{
1149                                         method: "GET",
1150                                         uri:    unsignedLocator,
1151                                 })
1152                         ExpectStatusCode(c,
1153                                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1154                         ExpectBody(c,
1155                                 "Unauthenticated request, unsigned locator",
1156                                 string(TestBlock),
1157                                 response)
1158                 }
1159                 ok <- true
1160         }()
1161         select {
1162         case <-time.After(20 * time.Second):
1163                 // If the buffer pool leaks, the test goroutine hangs.
1164                 c.Fatal("test did not finish, assuming pool leaked")
1165         case <-ok:
1166         }
1167 }
1168
1169 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1170         s.cluster.Volumes = map[string]arvados.Volume{
1171                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1172                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1173                 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1174         }
1175         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1176         rt := RequestTester{
1177                 method:      "PUT",
1178                 uri:         "/" + TestHash,
1179                 requestBody: TestBlock,
1180         }
1181
1182         for _, trial := range []struct {
1183                 ask    string
1184                 expect string
1185         }{
1186                 {"", ""},
1187                 {"default", "default=1"},
1188                 {" , default , default , ", "default=1"},
1189                 {"special", "extra=1, special=1"},
1190                 {"special, readonly", "extra=1, special=1"},
1191                 {"special, nonexistent", "extra=1, special=1"},
1192                 {"extra, special", "extra=1, special=1"},
1193                 {"default, special", "default=1, extra=1, special=1"},
1194         } {
1195                 c.Logf("success case %#v", trial)
1196                 rt.storageClasses = trial.ask
1197                 resp := IssueRequest(s.handler, &rt)
1198                 if trial.expect == "" {
1199                         // any non-empty value is correct
1200                         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1201                 } else {
1202                         c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1203                 }
1204         }
1205
1206         for _, trial := range []struct {
1207                 ask string
1208         }{
1209                 {"doesnotexist"},
1210                 {"doesnotexist, readonly"},
1211                 {"readonly"},
1212         } {
1213                 c.Logf("failure case %#v", trial)
1214                 rt.storageClasses = trial.ask
1215                 resp := IssueRequest(s.handler, &rt)
1216                 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1217         }
1218 }
1219
1220 func sortCommaSeparated(s string) string {
1221         slice := strings.Split(s, ", ")
1222         sort.Strings(slice)
1223         return strings.Join(slice, ", ")
1224 }
1225
1226 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1227         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1228
1229         resp := IssueRequest(s.handler, &RequestTester{
1230                 method:      "PUT",
1231                 uri:         "/" + TestHash,
1232                 requestBody: TestBlock,
1233         })
1234         c.Logf("%#v", resp)
1235         c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1236         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1237 }
1238
1239 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1240         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1241
1242         // Set up Keep volumes
1243         vols := s.handler.volmgr.AllWritable()
1244         vols[0].Put(context.Background(), TestHash, TestBlock)
1245
1246         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1247
1248         // unauthenticatedReq => UnauthorizedError
1249         unauthenticatedReq := &RequestTester{
1250                 method: "PUT",
1251                 uri:    "/untrash/" + TestHash,
1252         }
1253         response := IssueRequest(s.handler, unauthenticatedReq)
1254         ExpectStatusCode(c,
1255                 "Unauthenticated request",
1256                 UnauthorizedError.HTTPCode,
1257                 response)
1258
1259         // notDataManagerReq => UnauthorizedError
1260         notDataManagerReq := &RequestTester{
1261                 method:   "PUT",
1262                 uri:      "/untrash/" + TestHash,
1263                 apiToken: knownToken,
1264         }
1265
1266         response = IssueRequest(s.handler, notDataManagerReq)
1267         ExpectStatusCode(c,
1268                 "Non-datamanager token",
1269                 UnauthorizedError.HTTPCode,
1270                 response)
1271
1272         // datamanagerWithBadHashReq => StatusBadRequest
1273         datamanagerWithBadHashReq := &RequestTester{
1274                 method:   "PUT",
1275                 uri:      "/untrash/thisisnotalocator",
1276                 apiToken: s.cluster.SystemRootToken,
1277         }
1278         response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1279         ExpectStatusCode(c,
1280                 "Bad locator in untrash request",
1281                 http.StatusBadRequest,
1282                 response)
1283
1284         // datamanagerWrongMethodReq => StatusBadRequest
1285         datamanagerWrongMethodReq := &RequestTester{
1286                 method:   "GET",
1287                 uri:      "/untrash/" + TestHash,
1288                 apiToken: s.cluster.SystemRootToken,
1289         }
1290         response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1291         ExpectStatusCode(c,
1292                 "Only PUT method is supported for untrash",
1293                 http.StatusMethodNotAllowed,
1294                 response)
1295
1296         // datamanagerReq => StatusOK
1297         datamanagerReq := &RequestTester{
1298                 method:   "PUT",
1299                 uri:      "/untrash/" + TestHash,
1300                 apiToken: s.cluster.SystemRootToken,
1301         }
1302         response = IssueRequest(s.handler, datamanagerReq)
1303         ExpectStatusCode(c,
1304                 "",
1305                 http.StatusOK,
1306                 response)
1307         c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1308 }
1309
1310 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1311         // Change all volumes to read-only
1312         for uuid, v := range s.cluster.Volumes {
1313                 v.ReadOnly = true
1314                 s.cluster.Volumes[uuid] = v
1315         }
1316         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1317
1318         // datamanagerReq => StatusOK
1319         datamanagerReq := &RequestTester{
1320                 method:   "PUT",
1321                 uri:      "/untrash/" + TestHash,
1322                 apiToken: s.cluster.SystemRootToken,
1323         }
1324         response := IssueRequest(s.handler, datamanagerReq)
1325         ExpectStatusCode(c,
1326                 "No writable volumes",
1327                 http.StatusNotFound,
1328                 response)
1329 }
1330
1331 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1332         s.cluster.ManagementToken = arvadostest.ManagementToken
1333         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1334         pingReq := &RequestTester{
1335                 method:   "GET",
1336                 uri:      "/_health/ping",
1337                 apiToken: arvadostest.ManagementToken,
1338         }
1339         response := IssueHealthCheckRequest(s.handler, pingReq)
1340         ExpectStatusCode(c,
1341                 "",
1342                 http.StatusOK,
1343                 response)
1344         want := `{"health":"OK"}`
1345         if !strings.Contains(response.Body.String(), want) {
1346                 c.Errorf("expected response to include %s: got %s", want, response.Body.String())
1347         }
1348 }