17698: Do concurrent writes when multiple volumes are needed.
[arvados.git] / services / keepstore / handler_test.go
1 // Copyright (C) The Arvados Authors. All rights reserved.
2 //
3 // SPDX-License-Identifier: AGPL-3.0
4
5 // Tests for Keep HTTP handlers:
6 //
7 //     GetBlockHandler
8 //     PutBlockHandler
9 //     IndexHandler
10 //
11 // The HTTP handlers are responsible for enforcing permission policy,
12 // so these tests must exercise all possible permission permutations.
13
14 package main
15
16 import (
17         "bytes"
18         "context"
19         "encoding/json"
20         "fmt"
21         "net/http"
22         "net/http/httptest"
23         "os"
24         "sort"
25         "strings"
26         "sync/atomic"
27         "time"
28
29         "git.arvados.org/arvados.git/lib/config"
30         "git.arvados.org/arvados.git/sdk/go/arvados"
31         "git.arvados.org/arvados.git/sdk/go/arvadostest"
32         "git.arvados.org/arvados.git/sdk/go/ctxlog"
33         "github.com/prometheus/client_golang/prometheus"
34         check "gopkg.in/check.v1"
35 )
36
37 var testServiceURL = func() arvados.URL {
38         return arvados.URL{Host: "localhost:12345", Scheme: "http"}
39 }()
40
41 func testCluster(t TB) *arvados.Cluster {
42         cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
43         if err != nil {
44                 t.Fatal(err)
45         }
46         cluster, err := cfg.GetCluster("")
47         if err != nil {
48                 t.Fatal(err)
49         }
50         cluster.SystemRootToken = arvadostest.SystemRootToken
51         cluster.ManagementToken = arvadostest.ManagementToken
52         cluster.Collections.BlobSigning = false
53         return cluster
54 }
55
56 var _ = check.Suite(&HandlerSuite{})
57
58 type HandlerSuite struct {
59         cluster *arvados.Cluster
60         handler *handler
61 }
62
63 func (s *HandlerSuite) SetUpTest(c *check.C) {
64         s.cluster = testCluster(c)
65         s.cluster.Volumes = map[string]arvados.Volume{
66                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
67                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
68         }
69         s.handler = &handler{}
70 }
71
72 // A RequestTester represents the parameters for an HTTP request to
73 // be issued on behalf of a unit test.
74 type RequestTester struct {
75         uri            string
76         apiToken       string
77         method         string
78         requestBody    []byte
79         storageClasses string
80 }
81
82 // Test GetBlockHandler on the following situations:
83 //   - permissions off, unauthenticated request, unsigned locator
84 //   - permissions on, authenticated request, signed locator
85 //   - permissions on, authenticated request, unsigned locator
86 //   - permissions on, unauthenticated request, signed locator
87 //   - permissions on, authenticated request, expired locator
88 //   - permissions on, authenticated request, signed locator, transient error from backend
89 //
90 func (s *HandlerSuite) TestGetHandler(c *check.C) {
91         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
92
93         vols := s.handler.volmgr.AllWritable()
94         err := vols[0].Put(context.Background(), TestHash, TestBlock)
95         c.Check(err, check.IsNil)
96
97         // Create locators for testing.
98         // Turn on permission settings so we can generate signed locators.
99         s.cluster.Collections.BlobSigning = true
100         s.cluster.Collections.BlobSigningKey = knownKey
101         s.cluster.Collections.BlobSigningTTL.Set("5m")
102
103         var (
104                 unsignedLocator  = "/" + TestHash
105                 validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
106                 expiredTimestamp = time.Now().Add(-time.Hour)
107                 signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
108                 expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
109         )
110
111         // -----------------
112         // Test unauthenticated request with permissions off.
113         s.cluster.Collections.BlobSigning = false
114
115         // Unauthenticated request, unsigned locator
116         // => OK
117         response := IssueRequest(s.handler,
118                 &RequestTester{
119                         method: "GET",
120                         uri:    unsignedLocator,
121                 })
122         ExpectStatusCode(c,
123                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
124         ExpectBody(c,
125                 "Unauthenticated request, unsigned locator",
126                 string(TestBlock),
127                 response)
128
129         receivedLen := response.Header().Get("Content-Length")
130         expectedLen := fmt.Sprintf("%d", len(TestBlock))
131         if receivedLen != expectedLen {
132                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
133         }
134
135         // ----------------
136         // Permissions: on.
137         s.cluster.Collections.BlobSigning = true
138
139         // Authenticated request, signed locator
140         // => OK
141         response = IssueRequest(s.handler, &RequestTester{
142                 method:   "GET",
143                 uri:      signedLocator,
144                 apiToken: knownToken,
145         })
146         ExpectStatusCode(c,
147                 "Authenticated request, signed locator", http.StatusOK, response)
148         ExpectBody(c,
149                 "Authenticated request, signed locator", string(TestBlock), response)
150
151         receivedLen = response.Header().Get("Content-Length")
152         expectedLen = fmt.Sprintf("%d", len(TestBlock))
153         if receivedLen != expectedLen {
154                 c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
155         }
156
157         // Authenticated request, unsigned locator
158         // => PermissionError
159         response = IssueRequest(s.handler, &RequestTester{
160                 method:   "GET",
161                 uri:      unsignedLocator,
162                 apiToken: knownToken,
163         })
164         ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
165
166         // Unauthenticated request, signed locator
167         // => PermissionError
168         response = IssueRequest(s.handler, &RequestTester{
169                 method: "GET",
170                 uri:    signedLocator,
171         })
172         ExpectStatusCode(c,
173                 "Unauthenticated request, signed locator",
174                 PermissionError.HTTPCode, response)
175
176         // Authenticated request, expired locator
177         // => ExpiredError
178         response = IssueRequest(s.handler, &RequestTester{
179                 method:   "GET",
180                 uri:      expiredLocator,
181                 apiToken: knownToken,
182         })
183         ExpectStatusCode(c,
184                 "Authenticated request, expired locator",
185                 ExpiredError.HTTPCode, response)
186
187         // Authenticated request, signed locator
188         // => 503 Server busy (transient error)
189
190         // Set up the block owning volume to respond with errors
191         vols[0].Volume.(*MockVolume).Bad = true
192         vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
193         response = IssueRequest(s.handler, &RequestTester{
194                 method:   "GET",
195                 uri:      signedLocator,
196                 apiToken: knownToken,
197         })
198         // A transient error from one volume while the other doesn't find the block
199         // should make the service return a 503 so that clients can retry.
200         ExpectStatusCode(c,
201                 "Volume backend busy",
202                 503, response)
203 }
204
205 // Test PutBlockHandler on the following situations:
206 //   - no server key
207 //   - with server key, authenticated request, unsigned locator
208 //   - with server key, unauthenticated request, unsigned locator
209 //
210 func (s *HandlerSuite) TestPutHandler(c *check.C) {
211         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
212
213         // --------------
214         // No server key.
215
216         s.cluster.Collections.BlobSigningKey = ""
217
218         // Unauthenticated request, no server key
219         // => OK (unsigned response)
220         unsignedLocator := "/" + TestHash
221         response := IssueRequest(s.handler,
222                 &RequestTester{
223                         method:      "PUT",
224                         uri:         unsignedLocator,
225                         requestBody: TestBlock,
226                 })
227
228         ExpectStatusCode(c,
229                 "Unauthenticated request, no server key", http.StatusOK, response)
230         ExpectBody(c,
231                 "Unauthenticated request, no server key",
232                 TestHashPutResp, response)
233
234         // ------------------
235         // With a server key.
236
237         s.cluster.Collections.BlobSigningKey = knownKey
238         s.cluster.Collections.BlobSigningTTL.Set("5m")
239
240         // When a permission key is available, the locator returned
241         // from an authenticated PUT request will be signed.
242
243         // Authenticated PUT, signed locator
244         // => OK (signed response)
245         response = IssueRequest(s.handler,
246                 &RequestTester{
247                         method:      "PUT",
248                         uri:         unsignedLocator,
249                         requestBody: TestBlock,
250                         apiToken:    knownToken,
251                 })
252
253         ExpectStatusCode(c,
254                 "Authenticated PUT, signed locator, with server key",
255                 http.StatusOK, response)
256         responseLocator := strings.TrimSpace(response.Body.String())
257         if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
258                 c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
259                         "response '%s' does not contain a valid signature",
260                         responseLocator)
261         }
262
263         // Unauthenticated PUT, unsigned locator
264         // => OK
265         response = IssueRequest(s.handler,
266                 &RequestTester{
267                         method:      "PUT",
268                         uri:         unsignedLocator,
269                         requestBody: TestBlock,
270                 })
271
272         ExpectStatusCode(c,
273                 "Unauthenticated PUT, unsigned locator, with server key",
274                 http.StatusOK, response)
275         ExpectBody(c,
276                 "Unauthenticated PUT, unsigned locator, with server key",
277                 TestHashPutResp, response)
278 }
279
280 func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
281         s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
282         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
283
284         s.cluster.SystemRootToken = "fake-data-manager-token"
285         IssueRequest(s.handler,
286                 &RequestTester{
287                         method:      "PUT",
288                         uri:         "/" + TestHash,
289                         requestBody: TestBlock,
290                 })
291
292         s.cluster.Collections.BlobTrash = true
293         IssueRequest(s.handler,
294                 &RequestTester{
295                         method:      "DELETE",
296                         uri:         "/" + TestHash,
297                         requestBody: TestBlock,
298                         apiToken:    s.cluster.SystemRootToken,
299                 })
300         type expect struct {
301                 volid     string
302                 method    string
303                 callcount int
304         }
305         for _, e := range []expect{
306                 {"zzzzz-nyw5e-000000000000000", "Get", 0},
307                 {"zzzzz-nyw5e-000000000000000", "Compare", 0},
308                 {"zzzzz-nyw5e-000000000000000", "Touch", 0},
309                 {"zzzzz-nyw5e-000000000000000", "Put", 0},
310                 {"zzzzz-nyw5e-000000000000000", "Delete", 0},
311                 {"zzzzz-nyw5e-111111111111111", "Get", 0},
312                 {"zzzzz-nyw5e-111111111111111", "Compare", 1},
313                 {"zzzzz-nyw5e-111111111111111", "Touch", 1},
314                 {"zzzzz-nyw5e-111111111111111", "Put", 1},
315                 {"zzzzz-nyw5e-111111111111111", "Delete", 1},
316         } {
317                 if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
318                         c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
319                 }
320         }
321 }
322
323 func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
324         s.cluster.Volumes = map[string]arvados.Volume{
325                 "zzzzz-nyw5e-111111111111111": {
326                         Driver:         "mock",
327                         Replication:    1,
328                         StorageClasses: map[string]bool{"class1": true}},
329                 "zzzzz-nyw5e-222222222222222": {
330                         Driver:         "mock",
331                         Replication:    1,
332                         StorageClasses: map[string]bool{"class2": true, "class3": true}},
333         }
334
335         for _, trial := range []struct {
336                 priority1 int // priority of class1, thus vol1
337                 priority2 int // priority of class2
338                 priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
339                 get1      int // expected number of "get" ops on vol1
340                 get2      int // expected number of "get" ops on vol2
341         }{
342                 {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
343                 {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
344                 {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
345                 {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
346         } {
347                 c.Logf("%+v", trial)
348                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
349                         "class1": {Priority: trial.priority1},
350                         "class2": {Priority: trial.priority2},
351                         "class3": {Priority: trial.priority3},
352                 }
353                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
354                 IssueRequest(s.handler,
355                         &RequestTester{
356                                 method:         "PUT",
357                                 uri:            "/" + TestHash,
358                                 requestBody:    TestBlock,
359                                 storageClasses: "class1",
360                         })
361                 IssueRequest(s.handler,
362                         &RequestTester{
363                                 method: "GET",
364                                 uri:    "/" + TestHash,
365                         })
366                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
367                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
368         }
369 }
370
371 func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
372         s.cluster.Volumes = map[string]arvados.Volume{
373                 "zzzzz-nyw5e-111111111111111": {
374                         Driver:         "mock",
375                         Replication:    1,
376                         StorageClasses: map[string]bool{"class1": true}},
377                 "zzzzz-nyw5e-121212121212121": {
378                         Driver:         "mock",
379                         Replication:    1,
380                         StorageClasses: map[string]bool{"class1": true, "class2": true}},
381                 "zzzzz-nyw5e-222222222222222": {
382                         Driver:         "mock",
383                         Replication:    1,
384                         StorageClasses: map[string]bool{"class2": true}},
385         }
386
387         for _, trial := range []struct {
388                 setCounter uint32 // value to stuff vm.counter, to control offset
389                 classes    string // desired classes
390                 put111     int    // expected number of "put" ops on 11111... after 2x put reqs
391                 put121     int    // expected number of "put" ops on 12121...
392                 put222     int    // expected number of "put" ops on 22222...
393                 cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
394                 cmp121     int    // expected number of "compare" ops on 12121...
395                 cmp222     int    // expected number of "compare" ops on 22222...
396         }{
397                 {0, "class1",
398                         1, 0, 0,
399                         2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
400                 {0, "class2",
401                         0, 1, 0,
402                         0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
403                 {0, "class1,class2",
404                         1, 1, 0,
405                         2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
406                 {1, "class1,class2",
407                         0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
408                         2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
409                 {0, "class1,class2,class404",
410                         1, 1, 0,
411                         2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
412         } {
413                 c.Logf("%+v", trial)
414                 s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
415                         "class1": {},
416                         "class2": {},
417                         "class3": {},
418                 }
419                 c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
420                 atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
421                 for i := 0; i < 2; i++ {
422                         IssueRequest(s.handler,
423                                 &RequestTester{
424                                         method:         "PUT",
425                                         uri:            "/" + TestHash,
426                                         requestBody:    TestBlock,
427                                         storageClasses: trial.classes,
428                                 })
429                 }
430                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
431                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
432                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
433                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
434                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
435                 c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
436         }
437 }
438
439 // Test TOUCH requests.
440 func (s *HandlerSuite) TestTouchHandler(c *check.C) {
441         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
442         vols := s.handler.volmgr.AllWritable()
443         vols[0].Put(context.Background(), TestHash, TestBlock)
444         vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
445         afterPut := time.Now()
446         t, err := vols[0].Mtime(TestHash)
447         c.Assert(err, check.IsNil)
448         c.Assert(t.Before(afterPut), check.Equals, true)
449
450         ExpectStatusCode(c,
451                 "touch with no credentials",
452                 http.StatusUnauthorized,
453                 IssueRequest(s.handler, &RequestTester{
454                         method: "TOUCH",
455                         uri:    "/" + TestHash,
456                 }))
457
458         ExpectStatusCode(c,
459                 "touch with non-root credentials",
460                 http.StatusUnauthorized,
461                 IssueRequest(s.handler, &RequestTester{
462                         method:   "TOUCH",
463                         uri:      "/" + TestHash,
464                         apiToken: arvadostest.ActiveTokenV2,
465                 }))
466
467         ExpectStatusCode(c,
468                 "touch non-existent block",
469                 http.StatusNotFound,
470                 IssueRequest(s.handler, &RequestTester{
471                         method:   "TOUCH",
472                         uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
473                         apiToken: s.cluster.SystemRootToken,
474                 }))
475
476         beforeTouch := time.Now()
477         ExpectStatusCode(c,
478                 "touch block",
479                 http.StatusOK,
480                 IssueRequest(s.handler, &RequestTester{
481                         method:   "TOUCH",
482                         uri:      "/" + TestHash,
483                         apiToken: s.cluster.SystemRootToken,
484                 }))
485         t, err = vols[0].Mtime(TestHash)
486         c.Assert(err, check.IsNil)
487         c.Assert(t.After(beforeTouch), check.Equals, true)
488 }
489
490 // Test /index requests:
491 //   - unauthenticated /index request
492 //   - unauthenticated /index/prefix request
493 //   - authenticated   /index request        | non-superuser
494 //   - authenticated   /index/prefix request | non-superuser
495 //   - authenticated   /index request        | superuser
496 //   - authenticated   /index/prefix request | superuser
497 //
498 // The only /index requests that should succeed are those issued by the
499 // superuser. They should pass regardless of the value of BlobSigning.
500 //
501 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
502         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
503
504         // Include multiple blocks on different volumes, and
505         // some metadata files (which should be omitted from index listings)
506         vols := s.handler.volmgr.AllWritable()
507         vols[0].Put(context.Background(), TestHash, TestBlock)
508         vols[1].Put(context.Background(), TestHash2, TestBlock2)
509         vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
510         vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
511
512         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
513
514         unauthenticatedReq := &RequestTester{
515                 method: "GET",
516                 uri:    "/index",
517         }
518         authenticatedReq := &RequestTester{
519                 method:   "GET",
520                 uri:      "/index",
521                 apiToken: knownToken,
522         }
523         superuserReq := &RequestTester{
524                 method:   "GET",
525                 uri:      "/index",
526                 apiToken: s.cluster.SystemRootToken,
527         }
528         unauthPrefixReq := &RequestTester{
529                 method: "GET",
530                 uri:    "/index/" + TestHash[0:3],
531         }
532         authPrefixReq := &RequestTester{
533                 method:   "GET",
534                 uri:      "/index/" + TestHash[0:3],
535                 apiToken: knownToken,
536         }
537         superuserPrefixReq := &RequestTester{
538                 method:   "GET",
539                 uri:      "/index/" + TestHash[0:3],
540                 apiToken: s.cluster.SystemRootToken,
541         }
542         superuserNoSuchPrefixReq := &RequestTester{
543                 method:   "GET",
544                 uri:      "/index/abcd",
545                 apiToken: s.cluster.SystemRootToken,
546         }
547         superuserInvalidPrefixReq := &RequestTester{
548                 method:   "GET",
549                 uri:      "/index/xyz",
550                 apiToken: s.cluster.SystemRootToken,
551         }
552
553         // -------------------------------------------------------------
554         // Only the superuser should be allowed to issue /index requests.
555
556         // ---------------------------
557         // BlobSigning enabled
558         // This setting should not affect tests passing.
559         s.cluster.Collections.BlobSigning = true
560
561         // unauthenticated /index request
562         // => UnauthorizedError
563         response := IssueRequest(s.handler, unauthenticatedReq)
564         ExpectStatusCode(c,
565                 "permissions on, unauthenticated request",
566                 UnauthorizedError.HTTPCode,
567                 response)
568
569         // unauthenticated /index/prefix request
570         // => UnauthorizedError
571         response = IssueRequest(s.handler, unauthPrefixReq)
572         ExpectStatusCode(c,
573                 "permissions on, unauthenticated /index/prefix request",
574                 UnauthorizedError.HTTPCode,
575                 response)
576
577         // authenticated /index request, non-superuser
578         // => UnauthorizedError
579         response = IssueRequest(s.handler, authenticatedReq)
580         ExpectStatusCode(c,
581                 "permissions on, authenticated request, non-superuser",
582                 UnauthorizedError.HTTPCode,
583                 response)
584
585         // authenticated /index/prefix request, non-superuser
586         // => UnauthorizedError
587         response = IssueRequest(s.handler, authPrefixReq)
588         ExpectStatusCode(c,
589                 "permissions on, authenticated /index/prefix request, non-superuser",
590                 UnauthorizedError.HTTPCode,
591                 response)
592
593         // superuser /index request
594         // => OK
595         response = IssueRequest(s.handler, superuserReq)
596         ExpectStatusCode(c,
597                 "permissions on, superuser request",
598                 http.StatusOK,
599                 response)
600
601         // ----------------------------
602         // BlobSigning disabled
603         // Valid Request should still pass.
604         s.cluster.Collections.BlobSigning = false
605
606         // superuser /index request
607         // => OK
608         response = IssueRequest(s.handler, superuserReq)
609         ExpectStatusCode(c,
610                 "permissions on, superuser request",
611                 http.StatusOK,
612                 response)
613
614         expected := `^` + TestHash + `\+\d+ \d+\n` +
615                 TestHash2 + `\+\d+ \d+\n\n$`
616         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
617                 "permissions on, superuser request"))
618
619         // superuser /index/prefix request
620         // => OK
621         response = IssueRequest(s.handler, superuserPrefixReq)
622         ExpectStatusCode(c,
623                 "permissions on, superuser request",
624                 http.StatusOK,
625                 response)
626
627         expected = `^` + TestHash + `\+\d+ \d+\n\n$`
628         c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
629                 "permissions on, superuser /index/prefix request"))
630
631         // superuser /index/{no-such-prefix} request
632         // => OK
633         response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
634         ExpectStatusCode(c,
635                 "permissions on, superuser request",
636                 http.StatusOK,
637                 response)
638
639         if "\n" != response.Body.String() {
640                 c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
641         }
642
643         // superuser /index/{invalid-prefix} request
644         // => StatusBadRequest
645         response = IssueRequest(s.handler, superuserInvalidPrefixReq)
646         ExpectStatusCode(c,
647                 "permissions on, superuser request",
648                 http.StatusBadRequest,
649                 response)
650 }
651
652 // TestDeleteHandler
653 //
654 // Cases tested:
655 //
656 //   With no token and with a non-data-manager token:
657 //   * Delete existing block
658 //     (test for 403 Forbidden, confirm block not deleted)
659 //
660 //   With data manager token:
661 //
662 //   * Delete existing block
663 //     (test for 200 OK, response counts, confirm block deleted)
664 //
665 //   * Delete nonexistent block
666 //     (test for 200 OK, response counts)
667 //
668 //   TODO(twp):
669 //
670 //   * Delete block on read-only and read-write volume
671 //     (test for 200 OK, response with copies_deleted=1,
672 //     copies_failed=1, confirm block deleted only on r/w volume)
673 //
674 //   * Delete block on read-only volume only
675 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
676 //     confirm block not deleted)
677 //
678 func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
679         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
680
681         vols := s.handler.volmgr.AllWritable()
682         vols[0].Put(context.Background(), TestHash, TestBlock)
683
684         // Explicitly set the BlobSigningTTL to 0 for these
685         // tests, to ensure the MockVolume deletes the blocks
686         // even though they have just been created.
687         s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
688
689         var userToken = "NOT DATA MANAGER TOKEN"
690         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
691
692         s.cluster.Collections.BlobTrash = true
693
694         unauthReq := &RequestTester{
695                 method: "DELETE",
696                 uri:    "/" + TestHash,
697         }
698
699         userReq := &RequestTester{
700                 method:   "DELETE",
701                 uri:      "/" + TestHash,
702                 apiToken: userToken,
703         }
704
705         superuserExistingBlockReq := &RequestTester{
706                 method:   "DELETE",
707                 uri:      "/" + TestHash,
708                 apiToken: s.cluster.SystemRootToken,
709         }
710
711         superuserNonexistentBlockReq := &RequestTester{
712                 method:   "DELETE",
713                 uri:      "/" + TestHash2,
714                 apiToken: s.cluster.SystemRootToken,
715         }
716
717         // Unauthenticated request returns PermissionError.
718         var response *httptest.ResponseRecorder
719         response = IssueRequest(s.handler, unauthReq)
720         ExpectStatusCode(c,
721                 "unauthenticated request",
722                 PermissionError.HTTPCode,
723                 response)
724
725         // Authenticated non-admin request returns PermissionError.
726         response = IssueRequest(s.handler, userReq)
727         ExpectStatusCode(c,
728                 "authenticated non-admin request",
729                 PermissionError.HTTPCode,
730                 response)
731
732         // Authenticated admin request for nonexistent block.
733         type deletecounter struct {
734                 Deleted int `json:"copies_deleted"`
735                 Failed  int `json:"copies_failed"`
736         }
737         var responseDc, expectedDc deletecounter
738
739         response = IssueRequest(s.handler, superuserNonexistentBlockReq)
740         ExpectStatusCode(c,
741                 "data manager request, nonexistent block",
742                 http.StatusNotFound,
743                 response)
744
745         // Authenticated admin request for existing block while BlobTrash is false.
746         s.cluster.Collections.BlobTrash = false
747         response = IssueRequest(s.handler, superuserExistingBlockReq)
748         ExpectStatusCode(c,
749                 "authenticated request, existing block, method disabled",
750                 MethodDisabledError.HTTPCode,
751                 response)
752         s.cluster.Collections.BlobTrash = true
753
754         // Authenticated admin request for existing block.
755         response = IssueRequest(s.handler, superuserExistingBlockReq)
756         ExpectStatusCode(c,
757                 "data manager request, existing block",
758                 http.StatusOK,
759                 response)
760         // Expect response {"copies_deleted":1,"copies_failed":0}
761         expectedDc = deletecounter{1, 0}
762         json.NewDecoder(response.Body).Decode(&responseDc)
763         if responseDc != expectedDc {
764                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
765                         expectedDc, responseDc)
766         }
767         // Confirm the block has been deleted
768         buf := make([]byte, BlockSize)
769         _, err := vols[0].Get(context.Background(), TestHash, buf)
770         var blockDeleted = os.IsNotExist(err)
771         if !blockDeleted {
772                 c.Error("superuserExistingBlockReq: block not deleted")
773         }
774
775         // A DELETE request on a block newer than BlobSigningTTL
776         // should return success but leave the block on the volume.
777         vols[0].Put(context.Background(), TestHash, TestBlock)
778         s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
779
780         response = IssueRequest(s.handler, superuserExistingBlockReq)
781         ExpectStatusCode(c,
782                 "data manager request, existing block",
783                 http.StatusOK,
784                 response)
785         // Expect response {"copies_deleted":1,"copies_failed":0}
786         expectedDc = deletecounter{1, 0}
787         json.NewDecoder(response.Body).Decode(&responseDc)
788         if responseDc != expectedDc {
789                 c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
790                         expectedDc, responseDc)
791         }
792         // Confirm the block has NOT been deleted.
793         _, err = vols[0].Get(context.Background(), TestHash, buf)
794         if err != nil {
795                 c.Errorf("testing delete on new block: %s\n", err)
796         }
797 }
798
799 // TestPullHandler
800 //
801 // Test handling of the PUT /pull statement.
802 //
803 // Cases tested: syntactically valid and invalid pull lists, from the
804 // data manager and from unprivileged users:
805 //
806 //   1. Valid pull list from an ordinary user
807 //      (expected result: 401 Unauthorized)
808 //
809 //   2. Invalid pull request from an ordinary user
810 //      (expected result: 401 Unauthorized)
811 //
812 //   3. Valid pull request from the data manager
813 //      (expected result: 200 OK with request body "Received 3 pull
814 //      requests"
815 //
816 //   4. Invalid pull request from the data manager
817 //      (expected result: 400 Bad Request)
818 //
819 // Test that in the end, the pull manager received a good pull list with
820 // the expected number of requests.
821 //
822 // TODO(twp): test concurrency: launch 100 goroutines to update the
823 // pull list simultaneously.  Make sure that none of them return 400
824 // Bad Request and that pullq.GetList() returns a valid list.
825 //
826 func (s *HandlerSuite) TestPullHandler(c *check.C) {
827         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
828
829         // Replace the router's pullq -- which the worker goroutines
830         // started by setup() are now receiving from -- with a new
831         // one, so we can see what the handler sends to it.
832         pullq := NewWorkQueue()
833         s.handler.Handler.(*router).pullq = pullq
834
835         var userToken = "USER TOKEN"
836         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
837
838         goodJSON := []byte(`[
839                 {
840                         "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
841                         "servers":[
842                                 "http://server1",
843                                 "http://server2"
844                         ]
845                 },
846                 {
847                         "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
848                         "servers":[]
849                 },
850                 {
851                         "locator":"cccccccccccccccccccccccccccccccc+12345",
852                         "servers":["http://server1"]
853                 }
854         ]`)
855
856         badJSON := []byte(`{ "key":"I'm a little teapot" }`)
857
858         type pullTest struct {
859                 name         string
860                 req          RequestTester
861                 responseCode int
862                 responseBody string
863         }
864         var testcases = []pullTest{
865                 {
866                         "Valid pull list from an ordinary user",
867                         RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
868                         http.StatusUnauthorized,
869                         "Unauthorized\n",
870                 },
871                 {
872                         "Invalid pull request from an ordinary user",
873                         RequestTester{"/pull", userToken, "PUT", badJSON, ""},
874                         http.StatusUnauthorized,
875                         "Unauthorized\n",
876                 },
877                 {
878                         "Valid pull request from the data manager",
879                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
880                         http.StatusOK,
881                         "Received 3 pull requests\n",
882                 },
883                 {
884                         "Invalid pull request from the data manager",
885                         RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
886                         http.StatusBadRequest,
887                         "",
888                 },
889         }
890
891         for _, tst := range testcases {
892                 response := IssueRequest(s.handler, &tst.req)
893                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
894                 ExpectBody(c, tst.name, tst.responseBody, response)
895         }
896
897         // The Keep pull manager should have received one good list with 3
898         // requests on it.
899         for i := 0; i < 3; i++ {
900                 var item interface{}
901                 select {
902                 case item = <-pullq.NextItem:
903                 case <-time.After(time.Second):
904                         c.Error("timed out")
905                 }
906                 if _, ok := item.(PullRequest); !ok {
907                         c.Errorf("item %v could not be parsed as a PullRequest", item)
908                 }
909         }
910
911         expectChannelEmpty(c, pullq.NextItem)
912 }
913
914 // TestTrashHandler
915 //
916 // Test cases:
917 //
918 // Cases tested: syntactically valid and invalid trash lists, from the
919 // data manager and from unprivileged users:
920 //
921 //   1. Valid trash list from an ordinary user
922 //      (expected result: 401 Unauthorized)
923 //
924 //   2. Invalid trash list from an ordinary user
925 //      (expected result: 401 Unauthorized)
926 //
927 //   3. Valid trash list from the data manager
928 //      (expected result: 200 OK with request body "Received 3 trash
929 //      requests"
930 //
931 //   4. Invalid trash list from the data manager
932 //      (expected result: 400 Bad Request)
933 //
934 // Test that in the end, the trash collector received a good list
935 // trash list with the expected number of requests.
936 //
937 // TODO(twp): test concurrency: launch 100 goroutines to update the
938 // pull list simultaneously.  Make sure that none of them return 400
939 // Bad Request and that replica.Dump() returns a valid list.
940 //
941 func (s *HandlerSuite) TestTrashHandler(c *check.C) {
942         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
943         // Replace the router's trashq -- which the worker goroutines
944         // started by setup() are now receiving from -- with a new
945         // one, so we can see what the handler sends to it.
946         trashq := NewWorkQueue()
947         s.handler.Handler.(*router).trashq = trashq
948
949         var userToken = "USER TOKEN"
950         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
951
952         goodJSON := []byte(`[
953                 {
954                         "locator":"block1",
955                         "block_mtime":1409082153
956                 },
957                 {
958                         "locator":"block2",
959                         "block_mtime":1409082153
960                 },
961                 {
962                         "locator":"block3",
963                         "block_mtime":1409082153
964                 }
965         ]`)
966
967         badJSON := []byte(`I am not a valid JSON string`)
968
969         type trashTest struct {
970                 name         string
971                 req          RequestTester
972                 responseCode int
973                 responseBody string
974         }
975
976         var testcases = []trashTest{
977                 {
978                         "Valid trash list from an ordinary user",
979                         RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
980                         http.StatusUnauthorized,
981                         "Unauthorized\n",
982                 },
983                 {
984                         "Invalid trash list from an ordinary user",
985                         RequestTester{"/trash", userToken, "PUT", badJSON, ""},
986                         http.StatusUnauthorized,
987                         "Unauthorized\n",
988                 },
989                 {
990                         "Valid trash list from the data manager",
991                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
992                         http.StatusOK,
993                         "Received 3 trash requests\n",
994                 },
995                 {
996                         "Invalid trash list from the data manager",
997                         RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
998                         http.StatusBadRequest,
999                         "",
1000                 },
1001         }
1002
1003         for _, tst := range testcases {
1004                 response := IssueRequest(s.handler, &tst.req)
1005                 ExpectStatusCode(c, tst.name, tst.responseCode, response)
1006                 ExpectBody(c, tst.name, tst.responseBody, response)
1007         }
1008
1009         // The trash collector should have received one good list with 3
1010         // requests on it.
1011         for i := 0; i < 3; i++ {
1012                 item := <-trashq.NextItem
1013                 if _, ok := item.(TrashRequest); !ok {
1014                         c.Errorf("item %v could not be parsed as a TrashRequest", item)
1015                 }
1016         }
1017
1018         expectChannelEmpty(c, trashq.NextItem)
1019 }
1020
1021 // ====================
1022 // Helper functions
1023 // ====================
1024
1025 // IssueTestRequest executes an HTTP request described by rt, to a
1026 // REST router.  It returns the HTTP response to the request.
1027 func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1028         response := httptest.NewRecorder()
1029         body := bytes.NewReader(rt.requestBody)
1030         req, _ := http.NewRequest(rt.method, rt.uri, body)
1031         if rt.apiToken != "" {
1032                 req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
1033         }
1034         if rt.storageClasses != "" {
1035                 req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
1036         }
1037         handler.ServeHTTP(response, req)
1038         return response
1039 }
1040
1041 func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
1042         response := httptest.NewRecorder()
1043         body := bytes.NewReader(rt.requestBody)
1044         req, _ := http.NewRequest(rt.method, rt.uri, body)
1045         if rt.apiToken != "" {
1046                 req.Header.Set("Authorization", "Bearer "+rt.apiToken)
1047         }
1048         handler.ServeHTTP(response, req)
1049         return response
1050 }
1051
1052 // ExpectStatusCode checks whether a response has the specified status code,
1053 // and reports a test failure if not.
1054 func ExpectStatusCode(
1055         c *check.C,
1056         testname string,
1057         expectedStatus int,
1058         response *httptest.ResponseRecorder) {
1059         c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
1060 }
1061
1062 func ExpectBody(
1063         c *check.C,
1064         testname string,
1065         expectedBody string,
1066         response *httptest.ResponseRecorder) {
1067         if expectedBody != "" && response.Body.String() != expectedBody {
1068                 c.Errorf("%s: expected response body '%s', got %+v",
1069                         testname, expectedBody, response)
1070         }
1071 }
1072
1073 // See #7121
1074 func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
1075         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1076
1077         defer func(orig *bufferPool) {
1078                 bufs = orig
1079         }(bufs)
1080         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1081
1082         ok := make(chan struct{})
1083         go func() {
1084                 for i := 0; i < 2; i++ {
1085                         response := IssueRequest(s.handler,
1086                                 &RequestTester{
1087                                         method:      "PUT",
1088                                         uri:         "/" + TestHash,
1089                                         requestBody: TestBlock,
1090                                 })
1091                         ExpectStatusCode(c,
1092                                 "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
1093                 }
1094                 ok <- struct{}{}
1095         }()
1096
1097         select {
1098         case <-ok:
1099         case <-time.After(time.Second):
1100                 c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
1101         }
1102 }
1103
1104 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
1105 // leak.
1106 func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
1107         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1108
1109         ok := make(chan bool)
1110         go func() {
1111                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1112                         // Unauthenticated request, no server key
1113                         // => OK (unsigned response)
1114                         unsignedLocator := "/" + TestHash
1115                         response := IssueRequest(s.handler,
1116                                 &RequestTester{
1117                                         method:      "PUT",
1118                                         uri:         unsignedLocator,
1119                                         requestBody: TestBlock,
1120                                 })
1121                         ExpectStatusCode(c,
1122                                 "TestPutHandlerBufferleak", http.StatusOK, response)
1123                         ExpectBody(c,
1124                                 "TestPutHandlerBufferleak",
1125                                 TestHashPutResp, response)
1126                 }
1127                 ok <- true
1128         }()
1129         select {
1130         case <-time.After(20 * time.Second):
1131                 // If the buffer pool leaks, the test goroutine hangs.
1132                 c.Fatal("test did not finish, assuming pool leaked")
1133         case <-ok:
1134         }
1135 }
1136
1137 type notifyingResponseRecorder struct {
1138         *httptest.ResponseRecorder
1139         closer chan bool
1140 }
1141
1142 func (r *notifyingResponseRecorder) CloseNotify() <-chan bool {
1143         return r.closer
1144 }
1145
1146 func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
1147         s.cluster.Collections.BlobSigning = false
1148         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1149
1150         defer func(orig *bufferPool) {
1151                 bufs = orig
1152         }(bufs)
1153         bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
1154         defer bufs.Put(bufs.Get(BlockSize))
1155
1156         if err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1157                 c.Error(err)
1158         }
1159
1160         resp := &notifyingResponseRecorder{
1161                 ResponseRecorder: httptest.NewRecorder(),
1162                 closer:           make(chan bool, 1),
1163         }
1164         if _, ok := http.ResponseWriter(resp).(http.CloseNotifier); !ok {
1165                 c.Fatal("notifyingResponseRecorder is broken")
1166         }
1167         // If anyone asks, the client has disconnected.
1168         resp.closer <- true
1169
1170         ok := make(chan struct{})
1171         go func() {
1172                 req, _ := http.NewRequest("GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
1173                 s.handler.ServeHTTP(resp, req)
1174                 ok <- struct{}{}
1175         }()
1176
1177         select {
1178         case <-time.After(20 * time.Second):
1179                 c.Fatal("request took >20s, close notifier must be broken")
1180         case <-ok:
1181         }
1182
1183         ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp.ResponseRecorder)
1184         for i, v := range s.handler.volmgr.AllWritable() {
1185                 if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
1186                         c.Errorf("volume %d got %d calls, expected 0", i, calls)
1187                 }
1188         }
1189 }
1190
1191 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
1192 // leak.
1193 func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
1194         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1195
1196         vols := s.handler.volmgr.AllWritable()
1197         if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
1198                 c.Error(err)
1199         }
1200
1201         ok := make(chan bool)
1202         go func() {
1203                 for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
1204                         // Unauthenticated request, unsigned locator
1205                         // => OK
1206                         unsignedLocator := "/" + TestHash
1207                         response := IssueRequest(s.handler,
1208                                 &RequestTester{
1209                                         method: "GET",
1210                                         uri:    unsignedLocator,
1211                                 })
1212                         ExpectStatusCode(c,
1213                                 "Unauthenticated request, unsigned locator", http.StatusOK, response)
1214                         ExpectBody(c,
1215                                 "Unauthenticated request, unsigned locator",
1216                                 string(TestBlock),
1217                                 response)
1218                 }
1219                 ok <- true
1220         }()
1221         select {
1222         case <-time.After(20 * time.Second):
1223                 // If the buffer pool leaks, the test goroutine hangs.
1224                 c.Fatal("test did not finish, assuming pool leaked")
1225         case <-ok:
1226         }
1227 }
1228
1229 func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
1230         s.cluster.Volumes = map[string]arvados.Volume{
1231                 "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
1232                 "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
1233                 "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
1234         }
1235         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1236         rt := RequestTester{
1237                 method:      "PUT",
1238                 uri:         "/" + TestHash,
1239                 requestBody: TestBlock,
1240         }
1241
1242         for _, trial := range []struct {
1243                 ask    string
1244                 expect string
1245         }{
1246                 {"", ""},
1247                 {"default", "default=1"},
1248                 {" , default , default , ", "default=1"},
1249                 {"special", "extra=1, special=1"},
1250                 {"special, readonly", "extra=1, special=1"},
1251                 {"special, nonexistent", "extra=1, special=1"},
1252                 {"extra, special", "extra=1, special=1"},
1253                 {"default, special", "default=1, extra=1, special=1"},
1254         } {
1255                 c.Logf("success case %#v", trial)
1256                 rt.storageClasses = trial.ask
1257                 resp := IssueRequest(s.handler, &rt)
1258                 if trial.expect == "" {
1259                         // any non-empty value is correct
1260                         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
1261                 } else {
1262                         c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
1263                 }
1264         }
1265
1266         for _, trial := range []struct {
1267                 ask string
1268         }{
1269                 {"doesnotexist"},
1270                 {"doesnotexist, readonly"},
1271                 {"readonly"},
1272         } {
1273                 c.Logf("failure case %#v", trial)
1274                 rt.storageClasses = trial.ask
1275                 resp := IssueRequest(s.handler, &rt)
1276                 c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
1277         }
1278 }
1279
1280 func sortCommaSeparated(s string) string {
1281         slice := strings.Split(s, ", ")
1282         sort.Strings(slice)
1283         return strings.Join(slice, ", ")
1284 }
1285
1286 func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
1287         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1288
1289         resp := IssueRequest(s.handler, &RequestTester{
1290                 method:      "PUT",
1291                 uri:         "/" + TestHash,
1292                 requestBody: TestBlock,
1293         })
1294         c.Logf("%#v", resp)
1295         c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
1296         c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
1297 }
1298
1299 func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
1300         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1301
1302         // Set up Keep volumes
1303         vols := s.handler.volmgr.AllWritable()
1304         vols[0].Put(context.Background(), TestHash, TestBlock)
1305
1306         s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
1307
1308         // unauthenticatedReq => UnauthorizedError
1309         unauthenticatedReq := &RequestTester{
1310                 method: "PUT",
1311                 uri:    "/untrash/" + TestHash,
1312         }
1313         response := IssueRequest(s.handler, unauthenticatedReq)
1314         ExpectStatusCode(c,
1315                 "Unauthenticated request",
1316                 UnauthorizedError.HTTPCode,
1317                 response)
1318
1319         // notDataManagerReq => UnauthorizedError
1320         notDataManagerReq := &RequestTester{
1321                 method:   "PUT",
1322                 uri:      "/untrash/" + TestHash,
1323                 apiToken: knownToken,
1324         }
1325
1326         response = IssueRequest(s.handler, notDataManagerReq)
1327         ExpectStatusCode(c,
1328                 "Non-datamanager token",
1329                 UnauthorizedError.HTTPCode,
1330                 response)
1331
1332         // datamanagerWithBadHashReq => StatusBadRequest
1333         datamanagerWithBadHashReq := &RequestTester{
1334                 method:   "PUT",
1335                 uri:      "/untrash/thisisnotalocator",
1336                 apiToken: s.cluster.SystemRootToken,
1337         }
1338         response = IssueRequest(s.handler, datamanagerWithBadHashReq)
1339         ExpectStatusCode(c,
1340                 "Bad locator in untrash request",
1341                 http.StatusBadRequest,
1342                 response)
1343
1344         // datamanagerWrongMethodReq => StatusBadRequest
1345         datamanagerWrongMethodReq := &RequestTester{
1346                 method:   "GET",
1347                 uri:      "/untrash/" + TestHash,
1348                 apiToken: s.cluster.SystemRootToken,
1349         }
1350         response = IssueRequest(s.handler, datamanagerWrongMethodReq)
1351         ExpectStatusCode(c,
1352                 "Only PUT method is supported for untrash",
1353                 http.StatusMethodNotAllowed,
1354                 response)
1355
1356         // datamanagerReq => StatusOK
1357         datamanagerReq := &RequestTester{
1358                 method:   "PUT",
1359                 uri:      "/untrash/" + TestHash,
1360                 apiToken: s.cluster.SystemRootToken,
1361         }
1362         response = IssueRequest(s.handler, datamanagerReq)
1363         ExpectStatusCode(c,
1364                 "",
1365                 http.StatusOK,
1366                 response)
1367         c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
1368 }
1369
1370 func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
1371         // Change all volumes to read-only
1372         for uuid, v := range s.cluster.Volumes {
1373                 v.ReadOnly = true
1374                 s.cluster.Volumes[uuid] = v
1375         }
1376         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1377
1378         // datamanagerReq => StatusOK
1379         datamanagerReq := &RequestTester{
1380                 method:   "PUT",
1381                 uri:      "/untrash/" + TestHash,
1382                 apiToken: s.cluster.SystemRootToken,
1383         }
1384         response := IssueRequest(s.handler, datamanagerReq)
1385         ExpectStatusCode(c,
1386                 "No writable volumes",
1387                 http.StatusNotFound,
1388                 response)
1389 }
1390
1391 func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
1392         s.cluster.ManagementToken = arvadostest.ManagementToken
1393         c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
1394         pingReq := &RequestTester{
1395                 method:   "GET",
1396                 uri:      "/_health/ping",
1397                 apiToken: arvadostest.ManagementToken,
1398         }
1399         response := IssueHealthCheckRequest(s.handler, pingReq)
1400         ExpectStatusCode(c,
1401                 "",
1402                 http.StatusOK,
1403                 response)
1404         want := `{"health":"OK"}`
1405         if !strings.Contains(response.Body.String(), want) {
1406                 c.Errorf("expected response to include %s: got %s", want, response.Body.String())
1407         }
1408 }