Merge branch '17948-test-collection-tool' into main. Closes #17948
[arvados.git] / services / keepstore / handler_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Tests for Keep HTTP handlers:
6 //
7 //     GetBlockHandler
8 //     PutBlockHandler
9 //     IndexHandler
10 //
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
13
14 package main
15
16 import (
17         "bytes"
18         "context"
19         "encoding/json"
20         "fmt"
21         "net/http"
22         "net/http/httptest"
23         "os"
24         "sort"
25         "strings"
26         "sync/atomic"
27         "time"
28
29         "git.arvados.org/arvados.git/lib/config"
30         "git.arvados.org/arvados.git/sdk/go/arvados"
31         "git.arvados.org/arvados.git/sdk/go/arvadostest"
32         "git.arvados.org/arvados.git/sdk/go/ctxlog"
33         "github.com/prometheus/client_golang/prometheus"
34         check "gopkg.in/check.v1"
35 )
36
37 var testServiceURL = func() arvados.URL {
38         return arvados.URL{Host: "localhost:12345", Scheme: "http"}
39 }()
40
41 func testCluster(t TB) *arvados.Cluster {
42         cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
43         if err != nil {
44                 t.Fatal(err)
45         }
46         cluster, err := cfg.GetCluster("")
47         if err != nil {
48                 t.Fatal(err)
49         }
50         cluster.SystemRootToken = arvadostest.SystemRootToken
51         cluster.ManagementToken = arvadostest.ManagementToken
52         cluster.Collections.BlobSigning = false
53         return cluster
54 }
55
56 var _ = check.Suite(&HandlerSuite{})
57
58 type HandlerSuite struct {
59         cluster *arvados.Cluster
60         handler *handler
61 }
62
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64         s.cluster = testCluster(c)
65         s.cluster.Volumes = map[string]arvados.Volume{
66                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
68         }
69         s.handler = &handler{}
70 }
71
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
75         uri            string
76         apiToken       string
77         method         string
78         requestBody    []byte
79         storageClasses string
80 }
81
82 // Test GetBlockHandler on the following situations:
83 //   - permissions off, unauthenticated request, unsigned locator
84 //   - permissions on, authenticated request, signed locator
85 //   - permissions on, authenticated request, unsigned locator
86 //   - permissions on, unauthenticated request, signed locator
87 //   - permissions on, authenticated request, expired locator
88 //   - permissions on, authenticated request, signed locator, transient error from backend
89 //
90 func (s *HandlerSuite) TestGetHandler(c *check.C) {
91         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
92
93         vols := s.handler.volmgr.AllWritable()
94         err := vols[0].Put(context.Background(), TestHash, TestBlock)
95         c.Check(err, check.IsNil)
96
97         // Create locators for testing.
98         // Turn on permission settings so we can generate signed locators.
99         s.cluster.Collections.BlobSigning = true
100         s.cluster.Collections.BlobSigningKey = knownKey
101         s.cluster.Collections.BlobSigningTTL.Set("5m")
102
103         var (
104                 unsignedLocator  = "/" + TestHash
105                 validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
106                 expiredTimestamp = time.Now().Add(-time.Hour)
107                 signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
108                 expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
109         )
110
111         // -----------------
112         // Test unauthenticated request with permissions off.
113         s.cluster.Collections.BlobSigning = false
114
115         // Unauthenticated request, unsigned locator
116         // => OK
117         response := IssueRequest(s.handler,
118                 &RequestTester{
119                         method: "GET",
120                         uri:    unsignedLocator,
121                 })
122         ExpectStatusCode(c,
123                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
124         ExpectBody(c,
125                 "Unauthenticated request, unsigned locator",
126                 string(TestBlock),
127                 response)
128
129         receivedLen := response.Header().Get("Content-Length")
130         expectedLen := fmt.Sprintf("%d", len(TestBlock))
131         if receivedLen != expectedLen {
132                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
133         }
134
135         // ----------------
136         // Permissions: on.
137         s.cluster.Collections.BlobSigning = true
138
139         // Authenticated request, signed locator
140         // => OK
141         response = IssueRequest(s.handler, &RequestTester{
142                 method:   "GET",
143                 uri:      signedLocator,
144                 apiToken: knownToken,
145         })
146         ExpectStatusCode(c,
147                 "Authenticated request, signed locator", http.StatusOK, response)
148         ExpectBody(c,
149                 "Authenticated request, signed locator", string(TestBlock), response)
150
151         receivedLen = response.Header().Get("Content-Length")
152         expectedLen = fmt.Sprintf("%d", len(TestBlock))
153         if receivedLen != expectedLen {
154                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
155         }
156
157         // Authenticated request, unsigned locator
158         // => PermissionError
159         response = IssueRequest(s.handler, &RequestTester{
160                 method:   "GET",
161                 uri:      unsignedLocator,
162                 apiToken: knownToken,
163         })
164         ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
165
166         // Unauthenticated request, signed locator
167         // => PermissionError
168         response = IssueRequest(s.handler, &RequestTester{
169                 method: "GET",
170                 uri:    signedLocator,
171         })
172         ExpectStatusCode(c,
173                 "Unauthenticated request, signed locator",
174                 PermissionError.HTTPCode, response)
175
176         // Authenticated request, expired locator
177         // => ExpiredError
178         response = IssueRequest(s.handler, &RequestTester{
179                 method:   "GET",
180                 uri:      expiredLocator,
181                 apiToken: knownToken,
182         })
183         ExpectStatusCode(c,
184                 "Authenticated request, expired locator",
185                 ExpiredError.HTTPCode, response)
186
187         // Authenticated request, signed locator
188         // => 503 Server busy (transient error)
189
190         // Set up the block owning volume to respond with errors
191         vols[0].Volume.(*MockVolume).Bad = true
192         vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
193         response = IssueRequest(s.handler, &RequestTester{
194                 method:   "GET",
195                 uri:      signedLocator,
196                 apiToken: knownToken,
197         })
198         // A transient error from one volume while the other doesn't find the block
199         // should make the service return a 503 so that clients can retry.
200         ExpectStatusCode(c,
201                 "Volume backend busy",
202                 503, response)
203 }
204
205 // Test PutBlockHandler on the following situations:
206 //   - no server key
207 //   - with server key, authenticated request, unsigned locator
208 //   - with server key, unauthenticated request, unsigned locator
209 //
210 func (s *HandlerSuite) TestPutHandler(c *check.C) {
211         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
212
213         // --------------
214         // No server key.
215
216         s.cluster.Collections.BlobSigningKey = ""
217
218         // Unauthenticated request, no server key
219         // => OK (unsigned response)
220         unsignedLocator := "/" + TestHash
221         response := IssueRequest(s.handler,
222                 &RequestTester{
223                         method:      "PUT",
224                         uri:         unsignedLocator,
225                         requestBody: TestBlock,
226                 })
227
228         ExpectStatusCode(c,
229                 "Unauthenticated request, no server key", http.StatusOK, response)
230         ExpectBody(c,
231                 "Unauthenticated request, no server key",
232                 TestHashPutResp, response)
233
234         // ------------------
235         // With a server key.
236
237         s.cluster.Collections.BlobSigningKey = knownKey
238         s.cluster.Collections.BlobSigningTTL.Set("5m")
239
240         // When a permission key is available, the locator returned
241         // from an authenticated PUT request will be signed.
242
243         // Authenticated PUT, signed locator
244         // => OK (signed response)
245         response = IssueRequest(s.handler,
246                 &RequestTester{
247                         method:      "PUT",
248                         uri:         unsignedLocator,
249                         requestBody: TestBlock,
250                         apiToken:    knownToken,
251                 })
252
253         ExpectStatusCode(c,
254                 "Authenticated PUT, signed locator, with server key",
255                 http.StatusOK, response)
256         responseLocator := strings.TrimSpace(response.Body.String())
257         if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
258                 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
259                         "response '%s' does not contain a valid signature",
260                         responseLocator)
261         }
262
263         // Unauthenticated PUT, unsigned locator
264         // => OK
265         response = IssueRequest(s.handler,
266                 &RequestTester{
267                         method:      "PUT",
268                         uri:         unsignedLocator,
269                         requestBody: TestBlock,
270                 })
271
272         ExpectStatusCode(c,
273                 "Unauthenticated PUT, unsigned locator, with server key",
274                 http.StatusOK, response)
275         ExpectBody(c,
276                 "Unauthenticated PUT, unsigned locator, with server key",
277                 TestHashPutResp, response)
278 }
279
280 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
281         s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
282         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
283
284         s.cluster.SystemRootToken = "fake-data-manager-token"
285         IssueRequest(s.handler,
286                 &RequestTester{
287                         method:      "PUT",
288                         uri:         "/" + TestHash,
289                         requestBody: TestBlock,
290                 })
291
292         s.cluster.Collections.BlobTrash = true
293         IssueRequest(s.handler,
294                 &RequestTester{
295                         method:      "DELETE",
296                         uri:         "/" + TestHash,
297                         requestBody: TestBlock,
298                         apiToken:    s.cluster.SystemRootToken,
299                 })
300         type expect struct {
301                 volid     string
302                 method    string
303                 callcount int
304         }
305         for _, e := range []expect{
306                 {"zzzzz-nyw5e-000000000000000", "Get", 0},
307                 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
308                 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
309                 {"zzzzz-nyw5e-000000000000000", "Put", 0},
310                 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
311                 {"zzzzz-nyw5e-111111111111111", "Get", 0},
312                 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
313                 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
314                 {"zzzzz-nyw5e-111111111111111", "Put", 1},
315                 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
316         } {
317                 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
318                         c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
319                 }
320         }
321 }
322
323 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
324         s.cluster.Volumes = map[string]arvados.Volume{
325                 "zzzzz-nyw5e-111111111111111": {
326                         Driver:         "mock",
327                         Replication:    1,
328                         StorageClasses: map[string]bool{"class1": true}},
329                 "zzzzz-nyw5e-222222222222222": {
330                         Driver:         "mock",
331                         Replication:    1,
332                         StorageClasses: map[string]bool{"class2": true, "class3": true}},
333         }
334
335         for _, trial := range []struct {
336                 priority1 int // priority of class1, thus vol1
337                 priority2 int // priority of class2
338                 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
339                 get1      int // expected number of "get" ops on vol1
340                 get2      int // expected number of "get" ops on vol2
341         }{
342                 {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
343                 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
344                 {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
345                 {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
346         } {
347                 c.Logf("%+v", trial)
348                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
349                         "class1": {Priority: trial.priority1},
350                         "class2": {Priority: trial.priority2},
351                         "class3": {Priority: trial.priority3},
352                 }
353                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
354                 IssueRequest(s.handler,
355                         &RequestTester{
356                                 method:         "PUT",
357                                 uri:            "/" + TestHash,
358                                 requestBody:    TestBlock,
359                                 storageClasses: "class1",
360                         })
361                 IssueRequest(s.handler,
362                         &RequestTester{
363                                 method: "GET",
364                                 uri:    "/" + TestHash,
365                         })
366                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
367                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
368         }
369 }
370
371 func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
372         s.cluster.Volumes = map[string]arvados.Volume{
373                 "zzzzz-nyw5e-111111111111111": {
374                         Driver:         "mock",
375                         Replication:    1,
376                         ReadOnly:       true,
377                         StorageClasses: map[string]bool{"class1": true}},
378         }
379         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
380         resp := IssueRequest(s.handler,
381                 &RequestTester{
382                         method:         "PUT",
383                         uri:            "/" + TestHash,
384                         requestBody:    TestBlock,
385                         storageClasses: "class1",
386                 })
387         c.Check(resp.Code, check.Equals, FullError.HTTPCode)
388         c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
389 }
390
391 func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
392         s.cluster.Volumes = map[string]arvados.Volume{
393                 "zzzzz-nyw5e-111111111111111": {
394                         Driver:         "mock",
395                         Replication:    1,
396                         StorageClasses: map[string]bool{"class1": true}},
397                 "zzzzz-nyw5e-121212121212121": {
398                         Driver:         "mock",
399                         Replication:    1,
400                         StorageClasses: map[string]bool{"class1": true, "class2": true}},
401                 "zzzzz-nyw5e-222222222222222": {
402                         Driver:         "mock",
403                         Replication:    1,
404                         StorageClasses: map[string]bool{"class2": true}},
405         }
406
407         for _, trial := range []struct {
408                 setCounter uint32 // value to stuff vm.counter, to control offset
409                 classes    string // desired classes
410                 put111     int    // expected number of "put" ops on 11111... after 2x put reqs
411                 put121     int    // expected number of "put" ops on 12121...
412                 put222     int    // expected number of "put" ops on 22222...
413                 cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
414                 cmp121     int    // expected number of "compare" ops on 12121...
415                 cmp222     int    // expected number of "compare" ops on 22222...
416         }{
417                 {0, "class1",
418                         1, 0, 0,
419                         2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
420                 {0, "class2",
421                         0, 1, 0,
422                         0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
423                 {0, "class1,class2",
424                         1, 1, 0,
425                         2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
426                 {1, "class1,class2",
427                         0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
428                         2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
429                 {0, "class1,class2,class404",
430                         1, 1, 0,
431                         2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
432         } {
433                 c.Logf("%+v", trial)
434                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
435                         "class1": {},
436                         "class2": {},
437                         "class3": {},
438                 }
439                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
440                 atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
441                 for i := 0; i < 2; i++ {
442                         IssueRequest(s.handler,
443                                 &RequestTester{
444                                         method:         "PUT",
445                                         uri:            "/" + TestHash,
446                                         requestBody:    TestBlock,
447                                         storageClasses: trial.classes,
448                                 })
449                 }
450                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
451                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
452                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
453                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
454                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
455                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
456         }
457 }
458
459 // Test TOUCH requests.
460 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
461         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
462         vols := s.handler.volmgr.AllWritable()
463         vols[0].Put(context.Background(), TestHash, TestBlock)
464         vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
465         afterPut := time.Now()
466         t, err := vols[0].Mtime(TestHash)
467         c.Assert(err, check.IsNil)
468         c.Assert(t.Before(afterPut), check.Equals, true)
469
470         ExpectStatusCode(c,
471                 "touch with no credentials",
472                 http.StatusUnauthorized,
473                 IssueRequest(s.handler, &RequestTester{
474                         method: "TOUCH",
475                         uri:    "/" + TestHash,
476                 }))
477
478         ExpectStatusCode(c,
479                 "touch with non-root credentials",
480                 http.StatusUnauthorized,
481                 IssueRequest(s.handler, &RequestTester{
482                         method:   "TOUCH",
483                         uri:      "/" + TestHash,
484                         apiToken: arvadostest.ActiveTokenV2,
485                 }))
486
487         ExpectStatusCode(c,
488                 "touch non-existent block",
489                 http.StatusNotFound,
490                 IssueRequest(s.handler, &RequestTester{
491                         method:   "TOUCH",
492                         uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
493                         apiToken: s.cluster.SystemRootToken,
494                 }))
495
496         beforeTouch := time.Now()
497         ExpectStatusCode(c,
498                 "touch block",
499                 http.StatusOK,
500                 IssueRequest(s.handler, &RequestTester{
501                         method:   "TOUCH",
502                         uri:      "/" + TestHash,
503                         apiToken: s.cluster.SystemRootToken,
504                 }))
505         t, err = vols[0].Mtime(TestHash)
506         c.Assert(err, check.IsNil)
507         c.Assert(t.After(beforeTouch), check.Equals, true)
508 }
509
510 // Test /index requests:
511 //   - unauthenticated /index request
512 //   - unauthenticated /index/prefix request
513 //   - authenticated   /index request        | non-superuser
514 //   - authenticated   /index/prefix request | non-superuser
515 //   - authenticated   /index request        | superuser
516 //   - authenticated   /index/prefix request | superuser
517 //
518 // The only /index requests that should succeed are those issued by the
519 // superuser. They should pass regardless of the value of BlobSigning.
520 //
521 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
522         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
523
524         // Include multiple blocks on different volumes, and
525         // some metadata files (which should be omitted from index listings)
526         vols := s.handler.volmgr.AllWritable()
527         vols[0].Put(context.Background(), TestHash, TestBlock)
528         vols[1].Put(context.Background(), TestHash2, TestBlock2)
529         vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
530         vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
531
532         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
533
534         unauthenticatedReq := &RequestTester{
535                 method: "GET",
536                 uri:    "/index",
537         }
538         authenticatedReq := &RequestTester{
539                 method:   "GET",
540                 uri:      "/index",
541                 apiToken: knownToken,
542         }
543         superuserReq := &RequestTester{
544                 method:   "GET",
545                 uri:      "/index",
546                 apiToken: s.cluster.SystemRootToken,
547         }
548         unauthPrefixReq := &RequestTester{
549                 method: "GET",
550                 uri:    "/index/" + TestHash[0:3],
551         }
552         authPrefixReq := &RequestTester{
553                 method:   "GET",
554                 uri:      "/index/" + TestHash[0:3],
555                 apiToken: knownToken,
556         }
557         superuserPrefixReq := &RequestTester{
558                 method:   "GET",
559                 uri:      "/index/" + TestHash[0:3],
560                 apiToken: s.cluster.SystemRootToken,
561         }
562         superuserNoSuchPrefixReq := &RequestTester{
563                 method:   "GET",
564                 uri:      "/index/abcd",
565                 apiToken: s.cluster.SystemRootToken,
566         }
567         superuserInvalidPrefixReq := &RequestTester{
568                 method:   "GET",
569                 uri:      "/index/xyz",
570                 apiToken: s.cluster.SystemRootToken,
571         }
572
573         // -------------------------------------------------------------
574         // Only the superuser should be allowed to issue /index requests.
575
576         // ---------------------------
577         // BlobSigning enabled
578         // This setting should not affect tests passing.
579         s.cluster.Collections.BlobSigning = true
580
581         // unauthenticated /index request
582         // => UnauthorizedError
583         response := IssueRequest(s.handler, unauthenticatedReq)
584         ExpectStatusCode(c,
585                 "permissions on, unauthenticated request",
586                 UnauthorizedError.HTTPCode,
587                 response)
588
589         // unauthenticated /index/prefix request
590         // => UnauthorizedError
591         response = IssueRequest(s.handler, unauthPrefixReq)
592         ExpectStatusCode(c,
593                 "permissions on, unauthenticated /index/prefix request",
594                 UnauthorizedError.HTTPCode,
595                 response)
596
597         // authenticated /index request, non-superuser
598         // => UnauthorizedError
599         response = IssueRequest(s.handler, authenticatedReq)
600         ExpectStatusCode(c,
601                 "permissions on, authenticated request, non-superuser",
602                 UnauthorizedError.HTTPCode,
603                 response)
604
605         // authenticated /index/prefix request, non-superuser
606         // => UnauthorizedError
607         response = IssueRequest(s.handler, authPrefixReq)
608         ExpectStatusCode(c,
609                 "permissions on, authenticated /index/prefix request, non-superuser",
610                 UnauthorizedError.HTTPCode,
611                 response)
612
613         // superuser /index request
614         // => OK
615         response = IssueRequest(s.handler, superuserReq)
616         ExpectStatusCode(c,
617                 "permissions on, superuser request",
618                 http.StatusOK,
619                 response)
620
621         // ----------------------------
622         // BlobSigning disabled
623         // Valid Request should still pass.
624         s.cluster.Collections.BlobSigning = false
625
626         // superuser /index request
627         // => OK
628         response = IssueRequest(s.handler, superuserReq)
629         ExpectStatusCode(c,
630                 "permissions on, superuser request",
631                 http.StatusOK,
632                 response)
633
634         expected := `^` + TestHash + `\+\d+ \d+\n` +
635                 TestHash2 + `\+\d+ \d+\n\n$`
636         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
637                 "permissions on, superuser request"))
638
639         // superuser /index/prefix request
640         // => OK
641         response = IssueRequest(s.handler, superuserPrefixReq)
642         ExpectStatusCode(c,
643                 "permissions on, superuser request",
644                 http.StatusOK,
645                 response)
646
647         expected = `^` + TestHash + `\+\d+ \d+\n\n$`
648         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
649                 "permissions on, superuser /index/prefix request"))
650
651         // superuser /index/{no-such-prefix} request
652         // => OK
653         response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
654         ExpectStatusCode(c,
655                 "permissions on, superuser request",
656                 http.StatusOK,
657                 response)
658
659         if "\n" != response.Body.String() {
660                 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
661         }
662
663         // superuser /index/{invalid-prefix} request
664         // => StatusBadRequest
665         response = IssueRequest(s.handler, superuserInvalidPrefixReq)
666         ExpectStatusCode(c,
667                 "permissions on, superuser request",
668                 http.StatusBadRequest,
669                 response)
670 }
671
672 // TestDeleteHandler
673 //
674 // Cases tested:
675 //
676 //   With no token and with a non-data-manager token:
677 //   * Delete existing block
678 //     (test for 403 Forbidden, confirm block not deleted)
679 //
680 //   With data manager token:
681 //
682 //   * Delete existing block
683 //     (test for 200 OK, response counts, confirm block deleted)
684 //
685 //   * Delete nonexistent block
686 //     (test for 200 OK, response counts)
687 //
688 //   TODO(twp):
689 //
690 //   * Delete block on read-only and read-write volume
691 //     (test for 200 OK, response with copies_deleted=1,
692 //     copies_failed=1, confirm block deleted only on r/w volume)
693 //
694 //   * Delete block on read-only volume only
695 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
696 //     confirm block not deleted)
697 //
698 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
699         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
700
701         vols := s.handler.volmgr.AllWritable()
702         vols[0].Put(context.Background(), TestHash, TestBlock)
703
704         // Explicitly set the BlobSigningTTL to 0 for these
705         // tests, to ensure the MockVolume deletes the blocks
706         // even though they have just been created.
707         s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
708
709         var userToken = "NOT DATA MANAGER TOKEN"
710         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
711
712         s.cluster.Collections.BlobTrash = true
713
714         unauthReq := &RequestTester{
715                 method: "DELETE",
716                 uri:    "/" + TestHash,
717         }
718
719         userReq := &RequestTester{
720                 method:   "DELETE",
721                 uri:      "/" + TestHash,
722                 apiToken: userToken,
723         }
724
725         superuserExistingBlockReq := &RequestTester{
726                 method:   "DELETE",
727                 uri:      "/" + TestHash,
728                 apiToken: s.cluster.SystemRootToken,
729         }
730
731         superuserNonexistentBlockReq := &RequestTester{
732                 method:   "DELETE",
733                 uri:      "/" + TestHash2,
734                 apiToken: s.cluster.SystemRootToken,
735         }
736
737         // Unauthenticated request returns PermissionError.
738         var response *httptest.ResponseRecorder
739         response = IssueRequest(s.handler, unauthReq)
740         ExpectStatusCode(c,
741                 "unauthenticated request",
742                 PermissionError.HTTPCode,
743                 response)
744
745         // Authenticated non-admin request returns PermissionError.
746         response = IssueRequest(s.handler, userReq)
747         ExpectStatusCode(c,
748                 "authenticated non-admin request",
749                 PermissionError.HTTPCode,
750                 response)
751
752         // Authenticated admin request for nonexistent block.
753         type deletecounter struct {
754                 Deleted int `json:"copies_deleted"`
755                 Failed  int `json:"copies_failed"`
756         }
757         var responseDc, expectedDc deletecounter
758
759         response = IssueRequest(s.handler, superuserNonexistentBlockReq)
760         ExpectStatusCode(c,
761                 "data manager request, nonexistent block",
762                 http.StatusNotFound,
763                 response)
764
765         // Authenticated admin request for existing block while BlobTrash is false.
766         s.cluster.Collections.BlobTrash = false
767         response = IssueRequest(s.handler, superuserExistingBlockReq)
768         ExpectStatusCode(c,
769                 "authenticated request, existing block, method disabled",
770                 MethodDisabledError.HTTPCode,
771                 response)
772         s.cluster.Collections.BlobTrash = true
773
774         // Authenticated admin request for existing block.
775         response = IssueRequest(s.handler, superuserExistingBlockReq)
776         ExpectStatusCode(c,
777                 "data manager request, existing block",
778                 http.StatusOK,
779                 response)
780         // Expect response {"copies_deleted":1,"copies_failed":0}
781         expectedDc = deletecounter{1, 0}
782         json.NewDecoder(response.Body).Decode(&responseDc)
783         if responseDc != expectedDc {
784                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
785                         expectedDc, responseDc)
786         }
787         // Confirm the block has been deleted
788         buf := make([]byte, BlockSize)
789         _, err := vols[0].Get(context.Background(), TestHash, buf)
790         var blockDeleted = os.IsNotExist(err)
791         if !blockDeleted {
792                 c.Error("superuserExistingBlockReq: block not deleted")
793         }
794
795         // A DELETE request on a block newer than BlobSigningTTL
796         // should return success but leave the block on the volume.
797         vols[0].Put(context.Background(), TestHash, TestBlock)
798         s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
799
800         response = IssueRequest(s.handler, superuserExistingBlockReq)
801         ExpectStatusCode(c,
802                 "data manager request, existing block",
803                 http.StatusOK,
804                 response)
805         // Expect response {"copies_deleted":1,"copies_failed":0}
806         expectedDc = deletecounter{1, 0}
807         json.NewDecoder(response.Body).Decode(&responseDc)
808         if responseDc != expectedDc {
809                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
810                         expectedDc, responseDc)
811         }
812         // Confirm the block has NOT been deleted.
813         _, err = vols[0].Get(context.Background(), TestHash, buf)
814         if err != nil {
815                 c.Errorf("testing delete on new block: %s\n", err)
816         }
817 }
818
819 // TestPullHandler
820 //
821 // Test handling of the PUT /pull statement.
822 //
823 // Cases tested: syntactically valid and invalid pull lists, from the
824 // data manager and from unprivileged users:
825 //
826 //   1. Valid pull list from an ordinary user
827 //      (expected result: 401 Unauthorized)
828 //
829 //   2. Invalid pull request from an ordinary user
830 //      (expected result: 401 Unauthorized)
831 //
832 //   3. Valid pull request from the data manager
833 //      (expected result: 200 OK with request body "Received 3 pull
834 //      requests"
835 //
836 //   4. Invalid pull request from the data manager
837 //      (expected result: 400 Bad Request)
838 //
839 // Test that in the end, the pull manager received a good pull list with
840 // the expected number of requests.
841 //
842 // TODO(twp): test concurrency: launch 100 goroutines to update the
843 // pull list simultaneously.  Make sure that none of them return 400
844 // Bad Request and that pullq.GetList() returns a valid list.
845 //
846 func (s *HandlerSuite) TestPullHandler(c *check.C) {
847         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
848
849         // Replace the router's pullq -- which the worker goroutines
850         // started by setup() are now receiving from -- with a new
851         // one, so we can see what the handler sends to it.
852         pullq := NewWorkQueue()
853         s.handler.Handler.(*router).pullq = pullq
854
855         var userToken = "USER TOKEN"
856         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
857
858         goodJSON := []byte(`[
859                 {
860                         "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
861                         "servers":[
862                                 "http://server1",
863                                 "http://server2"
864                         ]
865                 },
866                 {
867                         "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
868                         "servers":[]
869                 },
870                 {
871                         "locator":"cccccccccccccccccccccccccccccccc+12345",
872                         "servers":["http://server1"]
873                 }
874         ]`)
875
876         badJSON := []byte(`{ "key":"I'm a little teapot" }`)
877
878         type pullTest struct {
879                 name         string
880                 req          RequestTester
881                 responseCode int
882                 responseBody string
883         }
884         var testcases = []pullTest{
885                 {
886                         "Valid pull list from an ordinary user",
887                         RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
888                         http.StatusUnauthorized,
889                         "Unauthorized\n",
890                 },
891                 {
892                         "Invalid pull request from an ordinary user",
893                         RequestTester{"/pull", userToken, "PUT", badJSON, ""},
894                         http.StatusUnauthorized,
895                         "Unauthorized\n",
896                 },
897                 {
898                         "Valid pull request from the data manager",
899                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
900                         http.StatusOK,
901                         "Received 3 pull requests\n",
902                 },
903                 {
904                         "Invalid pull request from the data manager",
905                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
906                         http.StatusBadRequest,
907                         "",
908                 },
909         }
910
911         for _, tst := range testcases {
912                 response := IssueRequest(s.handler, &tst.req)
913                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
914                 ExpectBody(c, tst.name, tst.responseBody, response)
915         }
916
917         // The Keep pull manager should have received one good list with 3
918         // requests on it.
919         for i := 0; i < 3; i++ {
920                 var item interface{}
921                 select {
922                 case item = <-pullq.NextItem:
923                 case <-time.After(time.Second):
924                         c.Error("timed out")
925                 }
926                 if _, ok := item.(PullRequest); !ok {
927                         c.Errorf("item %v could not be parsed as a PullRequest", item)
928                 }
929         }
930
931         expectChannelEmpty(c, pullq.NextItem)
932 }
933
934 // TestTrashHandler
935 //
936 // Test cases:
937 //
938 // Cases tested: syntactically valid and invalid trash lists, from the
939 // data manager and from unprivileged users:
940 //
941 //   1. Valid trash list from an ordinary user
942 //      (expected result: 401 Unauthorized)
943 //
944 //   2. Invalid trash list from an ordinary user
945 //      (expected result: 401 Unauthorized)
946 //
947 //   3. Valid trash list from the data manager
948 //      (expected result: 200 OK with request body "Received 3 trash
949 //      requests"
950 //
951 //   4. Invalid trash list from the data manager
952 //      (expected result: 400 Bad Request)
953 //
954 // Test that in the end, the trash collector received a good list
955 // trash list with the expected number of requests.
956 //
957 // TODO(twp): test concurrency: launch 100 goroutines to update the
958 // pull list simultaneously.  Make sure that none of them return 400
959 // Bad Request and that replica.Dump() returns a valid list.
960 //
961 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
962         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
963         // Replace the router's trashq -- which the worker goroutines
964         // started by setup() are now receiving from -- with a new
965         // one, so we can see what the handler sends to it.
966         trashq := NewWorkQueue()
967         s.handler.Handler.(*router).trashq = trashq
968
969         var userToken = "USER TOKEN"
970         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
971
972         goodJSON := []byte(`[
973                 {
974                         "locator":"block1",
975                         "block_mtime":1409082153
976                 },
977                 {
978                         "locator":"block2",
979                         "block_mtime":1409082153
980                 },
981                 {
982                         "locator":"block3",
983                         "block_mtime":1409082153
984                 }
985         ]`)
986
987         badJSON := []byte(`I am not a valid JSON string`)
988
989         type trashTest struct {
990                 name         string
991                 req          RequestTester
992                 responseCode int
993                 responseBody string
994         }
995
996         var testcases = []trashTest{
997                 {
998                         "Valid trash list from an ordinary user",
999                         RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
1000                         http.StatusUnauthorized,
1001                         "Unauthorized\n",
1002                 },
1003                 {
1004                         "Invalid trash list from an ordinary user",
1005                         RequestTester{"/trash", userToken, "PUT", badJSON, ""},
1006                         http.StatusUnauthorized,
1007                         "Unauthorized\n",
1008                 },
1009                 {
1010                         "Valid trash list from the data manager",
1011                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
1012                         http.StatusOK,
1013                         "Received 3 trash requests\n",
1014                 },
1015                 {
1016                         "Invalid trash list from the data manager",
1017                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
1018                         http.StatusBadRequest,
1019                         "",
1020                 },
1021         }
1022
1023         for _, tst := range testcases {
1024                 response := IssueRequest(s.handler, &tst.req)
1025                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
1026                 ExpectBody(c, tst.name, tst.responseBody, response)
1027         }
1028
1029         // The trash collector should have received one good list with 3
1030         // requests on it.
1031         for i := 0; i < 3; i++ {
1032                 item := <-trashq.NextItem
1033                 if _, ok := item.(TrashRequest); !ok {
1034                         c.Errorf("item %v could not be parsed as a TrashRequest", item)
1035                 }
1036         }
1037
1038         expectChannelEmpty(c, trashq.NextItem)
1039 }
1040
1041 // ====================
1042 // Helper functions
1043 // ====================
1044
1045 // IssueTestRequest executes an HTTP request described by rt, to a
1046 // REST router.  It returns the HTTP response to the request.
1047 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1048         response := httptest.NewRecorder()
1049         body := bytes.NewReader(rt.requestBody)
1050         req, _ := http.NewRequest(rt.method, rt.uri, body)
1051         if rt.apiToken != "" {
1052                 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
1053         }
1054         if rt.storageClasses != "" {
1055                 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
1056         }
1057         handler.ServeHTTP(response, req)
1058         return response
1059 }
1060
1061 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1062         response := httptest.NewRecorder()
1063         body := bytes.NewReader(rt.requestBody)
1064         req, _ := http.NewRequest(rt.method, rt.uri, body)
1065         if rt.apiToken != "" {
1066                 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
1067         }
1068         handler.ServeHTTP(response, req)
1069         return response
1070 }
1071
1072 // ExpectStatusCode checks whether a response has the specified status code,
1073 // and reports a test failure if not.
1074 func ExpectStatusCode(
1075         c *check.C,
1076         testname string,
1077         expectedStatus int,
1078         response *httptest.ResponseRecorder) {
1079         c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
1080 }
1081
1082 func ExpectBody(
1083         c *check.C,
1084         testname string,
1085         expectedBody string,
1086         response *httptest.ResponseRecorder) {
1087         if expectedBody != "" && response.Body.String() != expectedBody {
1088                 c.Errorf("%s: expected response body '%s', got %+v",
1089                         testname, expectedBody, response)
1090         }
1091 }
1092
1093 // See #7121
1094 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1095         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1096
1097         defer func(orig *bufferPool) {
1098                 bufs = orig
1099         }(bufs)
1100         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1101
1102         ok := make(chan struct{})
1103         go func() {
1104                 for i := 0; i < 2; i++ {
1105                         response := IssueRequest(s.handler,
1106                                 &RequestTester{
1107                                         method:      "PUT",
1108                                         uri:         "/" + TestHash,
1109                                         requestBody: TestBlock,
1110                                 })
1111                         ExpectStatusCode(c,
1112                                 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1113                 }
1114                 ok <- struct{}{}
1115         }()
1116
1117         select {
1118         case <-ok:
1119         case <-time.After(time.Second):
1120                 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1121         }
1122 }
1123
1124 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1125 // leak.
1126 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1127         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1128
1129         ok := make(chan bool)
1130         go func() {
1131                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1132                         // Unauthenticated request, no server key
1133                         // => OK (unsigned response)
1134                         unsignedLocator := "/" + TestHash
1135                         response := IssueRequest(s.handler,
1136                                 &RequestTester{
1137                                         method:      "PUT",
1138                                         uri:         unsignedLocator,
1139                                         requestBody: TestBlock,
1140                                 })
1141                         ExpectStatusCode(c,
1142                                 "TestPutHandlerBufferleak", http.StatusOK, response)
1143                         ExpectBody(c,
1144                                 "TestPutHandlerBufferleak",
1145                                 TestHashPutResp, response)
1146                 }
1147                 ok <- true
1148         }()
1149         select {
1150         case <-time.After(20 * time.Second):
1151                 // If the buffer pool leaks, the test goroutine hangs.
1152                 c.Fatal("test did not finish, assuming pool leaked")
1153         case <-ok:
1154         }
1155 }
1156
1157 type notifyingResponseRecorder struct {
1158         *httptest.ResponseRecorder
1159         closer chan bool
1160 }
1161
1162 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1163         return r.closer
1164 }
1165
1166 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1167         s.cluster.Collections.BlobSigning = false
1168         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1169
1170         defer func(orig *bufferPool) {
1171                 bufs = orig
1172         }(bufs)
1173         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1174         defer bufs.Put(bufs.Get(BlockSize))
1175
1176         if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1177                 c.Error(err)
1178         }
1179
1180         resp := &notifyingResponseRecorder{
1181                 ResponseRecorder: httptest.NewRecorder(),
1182                 closer:           make(chan bool, 1),
1183         }
1184         if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1185                 c.Fatal("notifyingResponseRecorder is broken")
1186         }
1187         // If anyone asks, the client has disconnected.
1188         resp.closer <- true
1189
1190         ok := make(chan struct{})
1191         go func() {
1192                 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1193                 s.handler.ServeHTTP(resp, req)
1194                 ok <- struct{}{}
1195         }()
1196
1197         select {
1198         case <-time.After(20 * time.Second):
1199                 c.Fatal("request took >20s, close notifier must be broken")
1200         case <-ok:
1201         }
1202
1203         ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1204         for i, v := range s.handler.volmgr.AllWritable() {
1205                 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1206                         c.Errorf("volume %d got %d calls, expected 0", i, calls)
1207                 }
1208         }
1209 }
1210
1211 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1212 // leak.
1213 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1214         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1215
1216         vols := s.handler.volmgr.AllWritable()
1217         if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1218                 c.Error(err)
1219         }
1220
1221         ok := make(chan bool)
1222         go func() {
1223                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1224                         // Unauthenticated request, unsigned locator
1225                         // => OK
1226                         unsignedLocator := "/" + TestHash
1227                         response := IssueRequest(s.handler,
1228                                 &RequestTester{
1229                                         method: "GET",
1230                                         uri:    unsignedLocator,
1231                                 })
1232                         ExpectStatusCode(c,
1233                                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1234                         ExpectBody(c,
1235                                 "Unauthenticated request, unsigned locator",
1236                                 string(TestBlock),
1237                                 response)
1238                 }
1239                 ok <- true
1240         }()
1241         select {
1242         case <-time.After(20 * time.Second):
1243                 // If the buffer pool leaks, the test goroutine hangs.
1244                 c.Fatal("test did not finish, assuming pool leaked")
1245         case <-ok:
1246         }
1247 }
1248
1249 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1250         s.cluster.Volumes = map[string]arvados.Volume{
1251                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1252                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1253                 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1254         }
1255         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1256         rt := RequestTester{
1257                 method:      "PUT",
1258                 uri:         "/" + TestHash,
1259                 requestBody: TestBlock,
1260         }
1261
1262         for _, trial := range []struct {
1263                 ask    string
1264                 expect string
1265         }{
1266                 {"", ""},
1267                 {"default", "default=1"},
1268                 {" , default , default , ", "default=1"},
1269                 {"special", "extra=1, special=1"},
1270                 {"special, readonly", "extra=1, special=1"},
1271                 {"special, nonexistent", "extra=1, special=1"},
1272                 {"extra, special", "extra=1, special=1"},
1273                 {"default, special", "default=1, extra=1, special=1"},
1274         } {
1275                 c.Logf("success case %#v", trial)
1276                 rt.storageClasses = trial.ask
1277                 resp := IssueRequest(s.handler, &rt)
1278                 if trial.expect == "" {
1279                         // any non-empty value is correct
1280                         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1281                 } else {
1282                         c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1283                 }
1284         }
1285
1286         for _, trial := range []struct {
1287                 ask string
1288         }{
1289                 {"doesnotexist"},
1290                 {"doesnotexist, readonly"},
1291                 {"readonly"},
1292         } {
1293                 c.Logf("failure case %#v", trial)
1294                 rt.storageClasses = trial.ask
1295                 resp := IssueRequest(s.handler, &rt)
1296                 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1297         }
1298 }
1299
1300 func sortCommaSeparated(s string) string {
1301         slice := strings.Split(s, ", ")
1302         sort.Strings(slice)
1303         return strings.Join(slice, ", ")
1304 }
1305
1306 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1307         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1308
1309         resp := IssueRequest(s.handler, &RequestTester{
1310                 method:      "PUT",
1311                 uri:         "/" + TestHash,
1312                 requestBody: TestBlock,
1313         })
1314         c.Logf("%#v", resp)
1315         c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1316         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1317 }
1318
1319 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1320         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1321
1322         // Set up Keep volumes
1323         vols := s.handler.volmgr.AllWritable()
1324         vols[0].Put(context.Background(), TestHash, TestBlock)
1325
1326         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1327
1328         // unauthenticatedReq => UnauthorizedError
1329         unauthenticatedReq := &RequestTester{
1330                 method: "PUT",
1331                 uri:    "/untrash/" + TestHash,
1332         }
1333         response := IssueRequest(s.handler, unauthenticatedReq)
1334         ExpectStatusCode(c,
1335                 "Unauthenticated request",
1336                 UnauthorizedError.HTTPCode,
1337                 response)
1338
1339         // notDataManagerReq => UnauthorizedError
1340         notDataManagerReq := &RequestTester{
1341                 method:   "PUT",
1342                 uri:      "/untrash/" + TestHash,
1343                 apiToken: knownToken,
1344         }
1345
1346         response = IssueRequest(s.handler, notDataManagerReq)
1347         ExpectStatusCode(c,
1348                 "Non-datamanager token",
1349                 UnauthorizedError.HTTPCode,
1350                 response)
1351
1352         // datamanagerWithBadHashReq => StatusBadRequest
1353         datamanagerWithBadHashReq := &RequestTester{
1354                 method:   "PUT",
1355                 uri:      "/untrash/thisisnotalocator",
1356                 apiToken: s.cluster.SystemRootToken,
1357         }
1358         response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1359         ExpectStatusCode(c,
1360                 "Bad locator in untrash request",
1361                 http.StatusBadRequest,
1362                 response)
1363
1364         // datamanagerWrongMethodReq => StatusBadRequest
1365         datamanagerWrongMethodReq := &RequestTester{
1366                 method:   "GET",
1367                 uri:      "/untrash/" + TestHash,
1368                 apiToken: s.cluster.SystemRootToken,
1369         }
1370         response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1371         ExpectStatusCode(c,
1372                 "Only PUT method is supported for untrash",
1373                 http.StatusMethodNotAllowed,
1374                 response)
1375
1376         // datamanagerReq => StatusOK
1377         datamanagerReq := &RequestTester{
1378                 method:   "PUT",
1379                 uri:      "/untrash/" + TestHash,
1380                 apiToken: s.cluster.SystemRootToken,
1381         }
1382         response = IssueRequest(s.handler, datamanagerReq)
1383         ExpectStatusCode(c,
1384                 "",
1385                 http.StatusOK,
1386                 response)
1387         c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1388 }
1389
1390 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1391         // Change all volumes to read-only
1392         for uuid, v := range s.cluster.Volumes {
1393                 v.ReadOnly = true
1394                 s.cluster.Volumes[uuid] = v
1395         }
1396         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1397
1398         // datamanagerReq => StatusOK
1399         datamanagerReq := &RequestTester{
1400                 method:   "PUT",
1401                 uri:      "/untrash/" + TestHash,
1402                 apiToken: s.cluster.SystemRootToken,
1403         }
1404         response := IssueRequest(s.handler, datamanagerReq)
1405         ExpectStatusCode(c,
1406                 "No writable volumes",
1407                 http.StatusNotFound,
1408                 response)
1409 }
1410
1411 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1412         s.cluster.ManagementToken = arvadostest.ManagementToken
1413         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1414         pingReq := &RequestTester{
1415                 method:   "GET",
1416                 uri:      "/_health/ping",
1417                 apiToken: arvadostest.ManagementToken,
1418         }
1419         response := IssueHealthCheckRequest(s.handler, pingReq)
1420         ExpectStatusCode(c,
1421                 "",
1422                 http.StatusOK,
1423                 response)
1424         want := `{"health":"OK"}`
1425         if !strings.Contains(response.Body.String(), want) {
1426                 c.Errorf("expected response to include %s: got %s", want, response.Body.String())
1427         }
1428 }