Refactor the multi-host salt install page.
[arvados.git] / services / keepstore / handler_test.go
index 8be471025db5119fb25510d2305fdaaa5cbb7257..d545bde0ab9193151b655328d3323f01932a2b19 100644 (file)
@@ -1,3 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
 // Tests for Keep HTTP handlers:
 //
 //     GetBlockHandler
 // The HTTP handlers are responsible for enforcing permission policy,
 // so these tests must exercise all possible permission permutations.
 
-package main
+package keepstore
 
 import (
        "bytes"
+       "context"
        "encoding/json"
        "fmt"
        "net/http"
        "net/http/httptest"
        "os"
-       "regexp"
+       "sort"
        "strings"
-       "testing"
+       "sync/atomic"
        "time"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
+       check "gopkg.in/check.v1"
 )
 
+var testServiceURL = func() arvados.URL {
+       return arvados.URL{Host: "localhost:12345", Scheme: "http"}
+}()
+
+func testCluster(t TB) *arvados.Cluster {
+       cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
+       if err != nil {
+               t.Fatal(err)
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               t.Fatal(err)
+       }
+       cluster.SystemRootToken = arvadostest.SystemRootToken
+       cluster.ManagementToken = arvadostest.ManagementToken
+       cluster.Collections.BlobSigning = false
+       return cluster
+}
+
+var _ = check.Suite(&HandlerSuite{})
+
+type HandlerSuite struct {
+       cluster *arvados.Cluster
+       handler *handler
+}
+
+func (s *HandlerSuite) SetUpTest(c *check.C) {
+       s.cluster = testCluster(c)
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
+       }
+       s.handler = &handler{}
+}
+
 // A RequestTester represents the parameters for an HTTP request to
 // be issued on behalf of a unit test.
 type RequestTester struct {
-       uri          string
-       api_token    string
-       method       string
-       request_body []byte
+       uri            string
+       apiToken       string
+       method         string
+       requestBody    []byte
+       storageClasses string
 }
 
 // Test GetBlockHandler on the following situations:
@@ -37,108 +85,121 @@ type RequestTester struct {
 //   - permissions on, authenticated request, unsigned locator
 //   - permissions on, unauthenticated request, signed locator
 //   - permissions on, authenticated request, expired locator
+//   - permissions on, authenticated request, signed locator, transient error from backend
 //
-func TestGetHandler(t *testing.T) {
-       defer teardown()
+func (s *HandlerSuite) TestGetHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
-       // Prepare two test Keep volumes. Our block is stored on the second volume.
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
-
-       vols := KeepVM.AllWritable()
-       if err := vols[0].Put(TEST_HASH, TEST_BLOCK); err != nil {
-               t.Error(err)
-       }
+       vols := s.handler.volmgr.AllWritable()
+       err := vols[0].Put(context.Background(), TestHash, TestBlock)
+       c.Check(err, check.IsNil)
 
        // Create locators for testing.
        // Turn on permission settings so we can generate signed locators.
-       enforce_permissions = true
-       PermissionSecret = []byte(known_key)
-       blob_signature_ttl = 300 * time.Second
+       s.cluster.Collections.BlobSigning = true
+       s.cluster.Collections.BlobSigningKey = knownKey
+       s.cluster.Collections.BlobSigningTTL.Set("5m")
 
        var (
-               unsigned_locator  = "/" + TEST_HASH
-               valid_timestamp   = time.Now().Add(blob_signature_ttl)
-               expired_timestamp = time.Now().Add(-time.Hour)
-               signed_locator    = "/" + SignLocator(TEST_HASH, known_token, valid_timestamp)
-               expired_locator   = "/" + SignLocator(TEST_HASH, known_token, expired_timestamp)
+               unsignedLocator  = "/" + TestHash
+               validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
+               expiredTimestamp = time.Now().Add(-time.Hour)
+               signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
+               expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
        )
 
        // -----------------
        // Test unauthenticated request with permissions off.
-       enforce_permissions = false
+       s.cluster.Collections.BlobSigning = false
 
        // Unauthenticated request, unsigned locator
        // => OK
-       response := IssueRequest(
+       response := IssueRequest(s.handler,
                &RequestTester{
                        method: "GET",
-                       uri:    unsigned_locator,
+                       uri:    unsignedLocator,
                })
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Unauthenticated request, unsigned locator", http.StatusOK, response)
-       ExpectBody(t,
+       ExpectBody(c,
                "Unauthenticated request, unsigned locator",
-               string(TEST_BLOCK),
+               string(TestBlock),
                response)
 
-       received_cl := response.Header().Get("Content-Length")
-       expected_cl := fmt.Sprintf("%d", len(TEST_BLOCK))
-       if received_cl != expected_cl {
-               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
+       receivedLen := response.Header().Get("Content-Length")
+       expectedLen := fmt.Sprintf("%d", len(TestBlock))
+       if receivedLen != expectedLen {
+               c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
        }
 
        // ----------------
        // Permissions: on.
-       enforce_permissions = true
+       s.cluster.Collections.BlobSigning = true
 
        // Authenticated request, signed locator
        // => OK
-       response = IssueRequest(&RequestTester{
-               method:    "GET",
-               uri:       signed_locator,
-               api_token: known_token,
+       response = IssueRequest(s.handler, &RequestTester{
+               method:   "GET",
+               uri:      signedLocator,
+               apiToken: knownToken,
        })
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Authenticated request, signed locator", http.StatusOK, response)
-       ExpectBody(t,
-               "Authenticated request, signed locator", string(TEST_BLOCK), response)
+       ExpectBody(c,
+               "Authenticated request, signed locator", string(TestBlock), response)
 
-       received_cl = response.Header().Get("Content-Length")
-       expected_cl = fmt.Sprintf("%d", len(TEST_BLOCK))
-       if received_cl != expected_cl {
-               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
+       receivedLen = response.Header().Get("Content-Length")
+       expectedLen = fmt.Sprintf("%d", len(TestBlock))
+       if receivedLen != expectedLen {
+               c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
        }
 
        // Authenticated request, unsigned locator
        // => PermissionError
-       response = IssueRequest(&RequestTester{
-               method:    "GET",
-               uri:       unsigned_locator,
-               api_token: known_token,
+       response = IssueRequest(s.handler, &RequestTester{
+               method:   "GET",
+               uri:      unsignedLocator,
+               apiToken: knownToken,
        })
-       ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response)
+       ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
 
        // Unauthenticated request, signed locator
        // => PermissionError
-       response = IssueRequest(&RequestTester{
+       response = IssueRequest(s.handler, &RequestTester{
                method: "GET",
-               uri:    signed_locator,
+               uri:    signedLocator,
        })
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Unauthenticated request, signed locator",
                PermissionError.HTTPCode, response)
 
        // Authenticated request, expired locator
        // => ExpiredError
-       response = IssueRequest(&RequestTester{
-               method:    "GET",
-               uri:       expired_locator,
-               api_token: known_token,
+       response = IssueRequest(s.handler, &RequestTester{
+               method:   "GET",
+               uri:      expiredLocator,
+               apiToken: knownToken,
        })
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Authenticated request, expired locator",
                ExpiredError.HTTPCode, response)
+
+       // Authenticated request, signed locator
+       // => 503 Server busy (transient error)
+
+       // Set up the block owning volume to respond with errors
+       vols[0].Volume.(*MockVolume).Bad = true
+       vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
+       response = IssueRequest(s.handler, &RequestTester{
+               method:   "GET",
+               uri:      signedLocator,
+               apiToken: knownToken,
+       })
+       // A transient error from one volume while the other doesn't find the block
+       // should make the service return a 503 so that clients can retry.
+       ExpectStatusCode(c,
+               "Volume backend busy",
+               503, response)
 }
 
 // Test PutBlockHandler on the following situations:
@@ -146,119 +207,306 @@ func TestGetHandler(t *testing.T) {
 //   - with server key, authenticated request, unsigned locator
 //   - with server key, unauthenticated request, unsigned locator
 //
-func TestPutHandler(t *testing.T) {
-       defer teardown()
-
-       // Prepare two test Keep volumes.
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
+func (s *HandlerSuite) TestPutHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
        // --------------
        // No server key.
 
+       s.cluster.Collections.BlobSigningKey = ""
+
        // Unauthenticated request, no server key
        // => OK (unsigned response)
-       unsigned_locator := "/" + TEST_HASH
-       response := IssueRequest(
+       unsignedLocator := "/" + TestHash
+       response := IssueRequest(s.handler,
                &RequestTester{
-                       method:       "PUT",
-                       uri:          unsigned_locator,
-                       request_body: TEST_BLOCK,
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
                })
 
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Unauthenticated request, no server key", http.StatusOK, response)
-       ExpectBody(t,
+       ExpectBody(c,
                "Unauthenticated request, no server key",
-               TEST_HASH_PUT_RESPONSE, response)
+               TestHashPutResp, response)
 
        // ------------------
        // With a server key.
 
-       PermissionSecret = []byte(known_key)
-       blob_signature_ttl = 300 * time.Second
+       s.cluster.Collections.BlobSigningKey = knownKey
+       s.cluster.Collections.BlobSigningTTL.Set("5m")
 
        // When a permission key is available, the locator returned
        // from an authenticated PUT request will be signed.
 
        // Authenticated PUT, signed locator
        // => OK (signed response)
-       response = IssueRequest(
+       response = IssueRequest(s.handler,
                &RequestTester{
-                       method:       "PUT",
-                       uri:          unsigned_locator,
-                       request_body: TEST_BLOCK,
-                       api_token:    known_token,
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
+                       apiToken:    knownToken,
                })
 
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Authenticated PUT, signed locator, with server key",
                http.StatusOK, response)
-       response_locator := strings.TrimSpace(response.Body.String())
-       if VerifySignature(response_locator, known_token) != nil {
-               t.Errorf("Authenticated PUT, signed locator, with server key:\n"+
+       responseLocator := strings.TrimSpace(response.Body.String())
+       if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
+               c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
                        "response '%s' does not contain a valid signature",
-                       response_locator)
+                       responseLocator)
        }
 
        // Unauthenticated PUT, unsigned locator
        // => OK
-       response = IssueRequest(
+       response = IssueRequest(s.handler,
                &RequestTester{
-                       method:       "PUT",
-                       uri:          unsigned_locator,
-                       request_body: TEST_BLOCK,
+                       method:      "PUT",
+                       uri:         unsignedLocator,
+                       requestBody: TestBlock,
                })
 
-       ExpectStatusCode(t,
+       ExpectStatusCode(c,
                "Unauthenticated PUT, unsigned locator, with server key",
                http.StatusOK, response)
-       ExpectBody(t,
+       ExpectBody(c,
                "Unauthenticated PUT, unsigned locator, with server key",
-               TEST_HASH_PUT_RESPONSE, response)
+               TestHashPutResp, response)
 }
 
-func TestPutAndDeleteSkipReadonlyVolumes(t *testing.T) {
-       defer teardown()
-       data_manager_token = "fake-data-manager-token"
-       vols := []*MockVolume{CreateMockVolume(), CreateMockVolume()}
-       vols[0].Readonly = true
-       KeepVM = MakeRRVolumeManager([]Volume{vols[0], vols[1]})
-       defer KeepVM.Close()
-       IssueRequest(
+func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
+       s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       s.cluster.SystemRootToken = "fake-data-manager-token"
+       IssueRequest(s.handler,
                &RequestTester{
-                       method:       "PUT",
-                       uri:          "/" + TEST_HASH,
-                       request_body: TEST_BLOCK,
+                       method:      "PUT",
+                       uri:         "/" + TestHash,
+                       requestBody: TestBlock,
                })
-       never_delete = false
-       IssueRequest(
+
+       s.cluster.Collections.BlobTrash = true
+       IssueRequest(s.handler,
                &RequestTester{
-                       method:       "DELETE",
-                       uri:          "/" + TEST_HASH,
-                       request_body: TEST_BLOCK,
-                       api_token:    data_manager_token,
+                       method:      "DELETE",
+                       uri:         "/" + TestHash,
+                       requestBody: TestBlock,
+                       apiToken:    s.cluster.SystemRootToken,
                })
        type expect struct {
-               volnum    int
+               volid     string
                method    string
                callcount int
        }
        for _, e := range []expect{
-               {0, "Get", 0},
-               {0, "Touch", 0},
-               {0, "Put", 0},
-               {0, "Delete", 0},
-               {1, "Get", 1},
-               {1, "Put", 1},
-               {1, "Delete", 1},
+               {"zzzzz-nyw5e-000000000000000", "Get", 0},
+               {"zzzzz-nyw5e-000000000000000", "Compare", 0},
+               {"zzzzz-nyw5e-000000000000000", "Touch", 0},
+               {"zzzzz-nyw5e-000000000000000", "Put", 0},
+               {"zzzzz-nyw5e-000000000000000", "Delete", 0},
+               {"zzzzz-nyw5e-111111111111111", "Get", 0},
+               {"zzzzz-nyw5e-111111111111111", "Compare", 1},
+               {"zzzzz-nyw5e-111111111111111", "Touch", 1},
+               {"zzzzz-nyw5e-111111111111111", "Put", 1},
+               {"zzzzz-nyw5e-111111111111111", "Delete", 1},
+       } {
+               if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
+                       c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
+               }
+       }
+}
+
+func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true}},
+               "zzzzz-nyw5e-222222222222222": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class2": true, "class3": true}},
+       }
+
+       for _, trial := range []struct {
+               priority1 int // priority of class1, thus vol1
+               priority2 int // priority of class2
+               priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
+               get1      int // expected number of "get" ops on vol1
+               get2      int // expected number of "get" ops on vol2
+       }{
+               {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
+               {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
+               {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
+               {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
        } {
-               if calls := vols[e.volnum].CallCount(e.method); calls != e.callcount {
-                       t.Errorf("Got %d %s() on vol %d, expect %d", calls, e.method, e.volnum, e.callcount)
+               c.Logf("%+v", trial)
+               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+                       "class1": {Priority: trial.priority1},
+                       "class2": {Priority: trial.priority2},
+                       "class3": {Priority: trial.priority3},
                }
+               c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+               IssueRequest(s.handler,
+                       &RequestTester{
+                               method:         "PUT",
+                               uri:            "/" + TestHash,
+                               requestBody:    TestBlock,
+                               storageClasses: "class1",
+                       })
+               IssueRequest(s.handler,
+                       &RequestTester{
+                               method: "GET",
+                               uri:    "/" + TestHash,
+                       })
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
        }
 }
 
+func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       ReadOnly:       true,
+                       StorageClasses: map[string]bool{"class1": true}},
+       }
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       resp := IssueRequest(s.handler,
+               &RequestTester{
+                       method:         "PUT",
+                       uri:            "/" + TestHash,
+                       requestBody:    TestBlock,
+                       storageClasses: "class1",
+               })
+       c.Check(resp.Code, check.Equals, FullError.HTTPCode)
+       c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
+}
+
+func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true}},
+               "zzzzz-nyw5e-121212121212121": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true, "class2": true}},
+               "zzzzz-nyw5e-222222222222222": {
+                       Driver:         "mock",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class2": true}},
+       }
+
+       for _, trial := range []struct {
+               setCounter uint32 // value to stuff vm.counter, to control offset
+               classes    string // desired classes
+               put111     int    // expected number of "put" ops on 11111... after 2x put reqs
+               put121     int    // expected number of "put" ops on 12121...
+               put222     int    // expected number of "put" ops on 22222...
+               cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
+               cmp121     int    // expected number of "compare" ops on 12121...
+               cmp222     int    // expected number of "compare" ops on 22222...
+       }{
+               {0, "class1",
+                       1, 0, 0,
+                       2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
+               {0, "class2",
+                       0, 1, 0,
+                       0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
+               {0, "class1,class2",
+                       1, 1, 0,
+                       2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
+               {1, "class1,class2",
+                       0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
+                       2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
+               {0, "class1,class2,class404",
+                       1, 1, 0,
+                       2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
+       } {
+               c.Logf("%+v", trial)
+               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+                       "class1": {},
+                       "class2": {},
+                       "class3": {},
+               }
+               c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+               atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
+               for i := 0; i < 2; i++ {
+                       IssueRequest(s.handler,
+                               &RequestTester{
+                                       method:         "PUT",
+                                       uri:            "/" + TestHash,
+                                       requestBody:    TestBlock,
+                                       storageClasses: trial.classes,
+                               })
+               }
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
+               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
+       }
+}
+
+// Test TOUCH requests.
+func (s *HandlerSuite) TestTouchHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       vols := s.handler.volmgr.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
+       afterPut := time.Now()
+       t, err := vols[0].Mtime(TestHash)
+       c.Assert(err, check.IsNil)
+       c.Assert(t.Before(afterPut), check.Equals, true)
+
+       ExpectStatusCode(c,
+               "touch with no credentials",
+               http.StatusUnauthorized,
+               IssueRequest(s.handler, &RequestTester{
+                       method: "TOUCH",
+                       uri:    "/" + TestHash,
+               }))
+
+       ExpectStatusCode(c,
+               "touch with non-root credentials",
+               http.StatusUnauthorized,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/" + TestHash,
+                       apiToken: arvadostest.ActiveTokenV2,
+               }))
+
+       ExpectStatusCode(c,
+               "touch non-existent block",
+               http.StatusNotFound,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+                       apiToken: s.cluster.SystemRootToken,
+               }))
+
+       beforeTouch := time.Now()
+       ExpectStatusCode(c,
+               "touch block",
+               http.StatusOK,
+               IssueRequest(s.handler, &RequestTester{
+                       method:   "TOUCH",
+                       uri:      "/" + TestHash,
+                       apiToken: s.cluster.SystemRootToken,
+               }))
+       t, err = vols[0].Mtime(TestHash)
+       c.Assert(err, check.IsNil)
+       c.Assert(t.After(beforeTouch), check.Equals, true)
+}
+
 // Test /index requests:
 //   - unauthenticated /index request
 //   - unauthenticated /index/prefix request
@@ -268,139 +516,157 @@ func TestPutAndDeleteSkipReadonlyVolumes(t *testing.T) {
 //   - authenticated   /index/prefix request | superuser
 //
 // The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of enforce_permissions.
+// superuser. They should pass regardless of the value of BlobSigning.
 //
-func TestIndexHandler(t *testing.T) {
-       defer teardown()
+func (s *HandlerSuite) TestIndexHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
-       // Set up Keep volumes and populate them.
        // Include multiple blocks on different volumes, and
        // some metadata files (which should be omitted from index listings)
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
+       vols := s.handler.volmgr.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       vols[1].Put(context.Background(), TestHash2, TestBlock2)
+       vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
+       vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
 
-       vols := KeepVM.AllWritable()
-       vols[0].Put(TEST_HASH, TEST_BLOCK)
-       vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
-       vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
-       vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
 
-       data_manager_token = "DATA MANAGER TOKEN"
-
-       unauthenticated_req := &RequestTester{
+       unauthenticatedReq := &RequestTester{
                method: "GET",
                uri:    "/index",
        }
-       authenticated_req := &RequestTester{
-               method:    "GET",
-               uri:       "/index",
-               api_token: known_token,
+       authenticatedReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index",
+               apiToken: knownToken,
        }
-       superuser_req := &RequestTester{
-               method:    "GET",
-               uri:       "/index",
-               api_token: data_manager_token,
+       superuserReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index",
+               apiToken: s.cluster.SystemRootToken,
        }
-       unauth_prefix_req := &RequestTester{
+       unauthPrefixReq := &RequestTester{
                method: "GET",
-               uri:    "/index/" + TEST_HASH[0:3],
+               uri:    "/index/" + TestHash[0:3],
+       }
+       authPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/" + TestHash[0:3],
+               apiToken: knownToken,
+       }
+       superuserPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/" + TestHash[0:3],
+               apiToken: s.cluster.SystemRootToken,
        }
-       auth_prefix_req := &RequestTester{
-               method:    "GET",
-               uri:       "/index/" + TEST_HASH[0:3],
-               api_token: known_token,
+       superuserNoSuchPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/abcd",
+               apiToken: s.cluster.SystemRootToken,
        }
-       superuser_prefix_req := &RequestTester{
-               method:    "GET",
-               uri:       "/index/" + TEST_HASH[0:3],
-               api_token: data_manager_token,
+       superuserInvalidPrefixReq := &RequestTester{
+               method:   "GET",
+               uri:      "/index/xyz",
+               apiToken: s.cluster.SystemRootToken,
        }
 
        // -------------------------------------------------------------
        // Only the superuser should be allowed to issue /index requests.
 
        // ---------------------------
-       // enforce_permissions enabled
+       // BlobSigning enabled
        // This setting should not affect tests passing.
-       enforce_permissions = true
+       s.cluster.Collections.BlobSigning = true
 
        // unauthenticated /index request
        // => UnauthorizedError
-       response := IssueRequest(unauthenticated_req)
-       ExpectStatusCode(t,
-               "enforce_permissions on, unauthenticated request",
+       response := IssueRequest(s.handler, unauthenticatedReq)
+       ExpectStatusCode(c,
+               "permissions on, unauthenticated request",
                UnauthorizedError.HTTPCode,
                response)
 
        // unauthenticated /index/prefix request
        // => UnauthorizedError
-       response = IssueRequest(unauth_prefix_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, unauthPrefixReq)
+       ExpectStatusCode(c,
                "permissions on, unauthenticated /index/prefix request",
                UnauthorizedError.HTTPCode,
                response)
 
        // authenticated /index request, non-superuser
        // => UnauthorizedError
-       response = IssueRequest(authenticated_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, authenticatedReq)
+       ExpectStatusCode(c,
                "permissions on, authenticated request, non-superuser",
                UnauthorizedError.HTTPCode,
                response)
 
        // authenticated /index/prefix request, non-superuser
        // => UnauthorizedError
-       response = IssueRequest(auth_prefix_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, authPrefixReq)
+       ExpectStatusCode(c,
                "permissions on, authenticated /index/prefix request, non-superuser",
                UnauthorizedError.HTTPCode,
                response)
 
        // superuser /index request
        // => OK
-       response = IssueRequest(superuser_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserReq)
+       ExpectStatusCode(c,
                "permissions on, superuser request",
                http.StatusOK,
                response)
 
        // ----------------------------
-       // enforce_permissions disabled
+       // BlobSigning disabled
        // Valid Request should still pass.
-       enforce_permissions = false
+       s.cluster.Collections.BlobSigning = false
 
        // superuser /index request
        // => OK
-       response = IssueRequest(superuser_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserReq)
+       ExpectStatusCode(c,
                "permissions on, superuser request",
                http.StatusOK,
                response)
 
-       expected := `^` + TEST_HASH + `\+\d+ \d+\n` +
-               TEST_HASH_2 + `\+\d+ \d+\n\n$`
-       match, _ := regexp.MatchString(expected, response.Body.String())
-       if !match {
-               t.Errorf(
-                       "permissions on, superuser request: expected %s, got:\n%s",
-                       expected, response.Body.String())
-       }
+       expected := `^` + TestHash + `\+\d+ \d+\n` +
+               TestHash2 + `\+\d+ \d+\n\n$`
+       c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+               "permissions on, superuser request"))
 
        // superuser /index/prefix request
        // => OK
-       response = IssueRequest(superuser_prefix_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserPrefixReq)
+       ExpectStatusCode(c,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       expected = `^` + TestHash + `\+\d+ \d+\n\n$`
+       c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+               "permissions on, superuser /index/prefix request"))
+
+       // superuser /index/{no-such-prefix} request
+       // => OK
+       response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
+       ExpectStatusCode(c,
                "permissions on, superuser request",
                http.StatusOK,
                response)
 
-       expected = `^` + TEST_HASH + `\+\d+ \d+\n\n$`
-       match, _ = regexp.MatchString(expected, response.Body.String())
-       if !match {
-               t.Errorf(
-                       "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
-                       expected, response.Body.String())
+       if "\n" != response.Body.String() {
+               c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
        }
+
+       // superuser /index/{invalid-prefix} request
+       // => StatusBadRequest
+       response = IssueRequest(s.handler, superuserInvalidPrefixReq)
+       ExpectStatusCode(c,
+               "permissions on, superuser request",
+               http.StatusBadRequest,
+               response)
 }
 
 // TestDeleteHandler
@@ -429,62 +695,56 @@ func TestIndexHandler(t *testing.T) {
 //     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
 //     confirm block not deleted)
 //
-func TestDeleteHandler(t *testing.T) {
-       defer teardown()
-
-       // Set up Keep volumes and populate them.
-       // Include multiple blocks on different volumes, and
-       // some metadata files (which should be omitted from index listings)
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
+func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
-       vols := KeepVM.AllWritable()
-       vols[0].Put(TEST_HASH, TEST_BLOCK)
+       vols := s.handler.volmgr.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
 
-       // Explicitly set the blob_signature_ttl to 0 for these
+       // Explicitly set the BlobSigningTTL to 0 for these
        // tests, to ensure the MockVolume deletes the blocks
        // even though they have just been created.
-       blob_signature_ttl = time.Duration(0)
+       s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
 
-       var user_token = "NOT DATA MANAGER TOKEN"
-       data_manager_token = "DATA MANAGER TOKEN"
+       var userToken = "NOT DATA MANAGER TOKEN"
+       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
 
-       never_delete = false
+       s.cluster.Collections.BlobTrash = true
 
-       unauth_req := &RequestTester{
+       unauthReq := &RequestTester{
                method: "DELETE",
-               uri:    "/" + TEST_HASH,
+               uri:    "/" + TestHash,
        }
 
-       user_req := &RequestTester{
-               method:    "DELETE",
-               uri:       "/" + TEST_HASH,
-               api_token: user_token,
+       userReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash,
+               apiToken: userToken,
        }
 
-       superuser_existing_block_req := &RequestTester{
-               method:    "DELETE",
-               uri:       "/" + TEST_HASH,
-               api_token: data_manager_token,
+       superuserExistingBlockReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash,
+               apiToken: s.cluster.SystemRootToken,
        }
 
-       superuser_nonexistent_block_req := &RequestTester{
-               method:    "DELETE",
-               uri:       "/" + TEST_HASH_2,
-               api_token: data_manager_token,
+       superuserNonexistentBlockReq := &RequestTester{
+               method:   "DELETE",
+               uri:      "/" + TestHash2,
+               apiToken: s.cluster.SystemRootToken,
        }
 
        // Unauthenticated request returns PermissionError.
        var response *httptest.ResponseRecorder
-       response = IssueRequest(unauth_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, unauthReq)
+       ExpectStatusCode(c,
                "unauthenticated request",
                PermissionError.HTTPCode,
                response)
 
        // Authenticated non-admin request returns PermissionError.
-       response = IssueRequest(user_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, userReq)
+       ExpectStatusCode(c,
                "authenticated non-admin request",
                PermissionError.HTTPCode,
                response)
@@ -494,64 +754,65 @@ func TestDeleteHandler(t *testing.T) {
                Deleted int `json:"copies_deleted"`
                Failed  int `json:"copies_failed"`
        }
-       var response_dc, expected_dc deletecounter
+       var responseDc, expectedDc deletecounter
 
-       response = IssueRequest(superuser_nonexistent_block_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserNonexistentBlockReq)
+       ExpectStatusCode(c,
                "data manager request, nonexistent block",
                http.StatusNotFound,
                response)
 
-       // Authenticated admin request for existing block while never_delete is set.
-       never_delete = true
-       response = IssueRequest(superuser_existing_block_req)
-       ExpectStatusCode(t,
+       // Authenticated admin request for existing block while BlobTrash is false.
+       s.cluster.Collections.BlobTrash = false
+       response = IssueRequest(s.handler, superuserExistingBlockReq)
+       ExpectStatusCode(c,
                "authenticated request, existing block, method disabled",
                MethodDisabledError.HTTPCode,
                response)
-       never_delete = false
+       s.cluster.Collections.BlobTrash = true
 
        // Authenticated admin request for existing block.
-       response = IssueRequest(superuser_existing_block_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserExistingBlockReq)
+       ExpectStatusCode(c,
                "data manager request, existing block",
                http.StatusOK,
                response)
        // Expect response {"copies_deleted":1,"copies_failed":0}
-       expected_dc = deletecounter{1, 0}
-       json.NewDecoder(response.Body).Decode(&response_dc)
-       if response_dc != expected_dc {
-               t.Errorf("superuser_existing_block_req\nexpected: %+v\nreceived: %+v",
-                       expected_dc, response_dc)
+       expectedDc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&responseDc)
+       if responseDc != expectedDc {
+               c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
+                       expectedDc, responseDc)
        }
        // Confirm the block has been deleted
-       _, err := vols[0].Get(TEST_HASH)
-       var block_deleted = os.IsNotExist(err)
-       if !block_deleted {
-               t.Error("superuser_existing_block_req: block not deleted")
+       buf := make([]byte, BlockSize)
+       _, err := vols[0].Get(context.Background(), TestHash, buf)
+       var blockDeleted = os.IsNotExist(err)
+       if !blockDeleted {
+               c.Error("superuserExistingBlockReq: block not deleted")
        }
 
-       // A DELETE request on a block newer than blob_signature_ttl
+       // A DELETE request on a block newer than BlobSigningTTL
        // should return success but leave the block on the volume.
-       vols[0].Put(TEST_HASH, TEST_BLOCK)
-       blob_signature_ttl = time.Hour
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+       s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
 
-       response = IssueRequest(superuser_existing_block_req)
-       ExpectStatusCode(t,
+       response = IssueRequest(s.handler, superuserExistingBlockReq)
+       ExpectStatusCode(c,
                "data manager request, existing block",
                http.StatusOK,
                response)
        // Expect response {"copies_deleted":1,"copies_failed":0}
-       expected_dc = deletecounter{1, 0}
-       json.NewDecoder(response.Body).Decode(&response_dc)
-       if response_dc != expected_dc {
-               t.Errorf("superuser_existing_block_req\nexpected: %+v\nreceived: %+v",
-                       expected_dc, response_dc)
+       expectedDc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&responseDc)
+       if responseDc != expectedDc {
+               c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
+                       expectedDc, responseDc)
        }
        // Confirm the block has NOT been deleted.
-       _, err = vols[0].Get(TEST_HASH)
+       _, err = vols[0].Get(context.Background(), TestHash, buf)
        if err != nil {
-               t.Errorf("testing delete on new block: %s\n", err)
+               c.Errorf("testing delete on new block: %s\n", err)
        }
 }
 
@@ -582,83 +843,92 @@ func TestDeleteHandler(t *testing.T) {
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that pullq.GetList() returns a valid list.
 //
-func TestPullHandler(t *testing.T) {
-       defer teardown()
+func (s *HandlerSuite) TestPullHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
-       var user_token = "USER TOKEN"
-       data_manager_token = "DATA MANAGER TOKEN"
+       // Replace the router's pullq -- which the worker goroutines
+       // started by setup() are now receiving from -- with a new
+       // one, so we can see what the handler sends to it.
+       pullq := NewWorkQueue()
+       s.handler.Handler.(*router).pullq = pullq
 
-       pullq = NewWorkQueue()
+       var userToken = "USER TOKEN"
+       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
 
-       good_json := []byte(`[
+       goodJSON := []byte(`[
                {
-                       "locator":"locator_with_two_servers",
+                       "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
                        "servers":[
-                               "server1",
-                               "server2"
+                               "http://server1",
+                               "http://server2"
                        ]
                },
                {
-                       "locator":"locator_with_no_servers",
+                       "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
                        "servers":[]
                },
                {
-                       "locator":"",
-                       "servers":["empty_locator"]
+                       "locator":"cccccccccccccccccccccccccccccccc+12345",
+                       "servers":["http://server1"]
                }
        ]`)
 
-       bad_json := []byte(`{ "key":"I'm a little teapot" }`)
+       badJSON := []byte(`{ "key":"I'm a little teapot" }`)
 
        type pullTest struct {
-               name          string
-               req           RequestTester
-               response_code int
-               response_body string
+               name         string
+               req          RequestTester
+               responseCode int
+               responseBody string
        }
        var testcases = []pullTest{
                {
                        "Valid pull list from an ordinary user",
-                       RequestTester{"/pull", user_token, "PUT", good_json},
+                       RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
                        http.StatusUnauthorized,
                        "Unauthorized\n",
                },
                {
                        "Invalid pull request from an ordinary user",
-                       RequestTester{"/pull", user_token, "PUT", bad_json},
+                       RequestTester{"/pull", userToken, "PUT", badJSON, ""},
                        http.StatusUnauthorized,
                        "Unauthorized\n",
                },
                {
                        "Valid pull request from the data manager",
-                       RequestTester{"/pull", data_manager_token, "PUT", good_json},
+                       RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
                        http.StatusOK,
                        "Received 3 pull requests\n",
                },
                {
                        "Invalid pull request from the data manager",
-                       RequestTester{"/pull", data_manager_token, "PUT", bad_json},
+                       RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
                        http.StatusBadRequest,
                        "",
                },
        }
 
        for _, tst := range testcases {
-               response := IssueRequest(&tst.req)
-               ExpectStatusCode(t, tst.name, tst.response_code, response)
-               ExpectBody(t, tst.name, tst.response_body, response)
+               response := IssueRequest(s.handler, &tst.req)
+               ExpectStatusCode(c, tst.name, tst.responseCode, response)
+               ExpectBody(c, tst.name, tst.responseBody, response)
        }
 
        // The Keep pull manager should have received one good list with 3
        // requests on it.
        for i := 0; i < 3; i++ {
-               item := <-pullq.NextItem
+               var item interface{}
+               select {
+               case item = <-pullq.NextItem:
+               case <-time.After(time.Second):
+                       c.Error("timed out")
+               }
                if _, ok := item.(PullRequest); !ok {
-                       t.Errorf("item %v could not be parsed as a PullRequest", item)
+                       c.Errorf("item %v could not be parsed as a PullRequest", item)
                }
        }
 
-       expectChannelEmpty(t, pullq.NextItem)
+       expectChannelEmpty(c, pullq.NextItem)
 }
 
 // TestTrashHandler
@@ -688,15 +958,18 @@ func TestPullHandler(t *testing.T) {
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that replica.Dump() returns a valid list.
 //
-func TestTrashHandler(t *testing.T) {
-       defer teardown()
-
-       var user_token = "USER TOKEN"
-       data_manager_token = "DATA MANAGER TOKEN"
-
-       trashq = NewWorkQueue()
-
-       good_json := []byte(`[
+func (s *HandlerSuite) TestTrashHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       // Replace the router's trashq -- which the worker goroutines
+       // started by setup() are now receiving from -- with a new
+       // one, so we can see what the handler sends to it.
+       trashq := NewWorkQueue()
+       s.handler.Handler.(*router).trashq = trashq
+
+       var userToken = "USER TOKEN"
+       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
+
+       goodJSON := []byte(`[
                {
                        "locator":"block1",
                        "block_mtime":1409082153
@@ -711,46 +984,46 @@ func TestTrashHandler(t *testing.T) {
                }
        ]`)
 
-       bad_json := []byte(`I am not a valid JSON string`)
+       badJSON := []byte(`I am not a valid JSON string`)
 
        type trashTest struct {
-               name          string
-               req           RequestTester
-               response_code int
-               response_body string
+               name         string
+               req          RequestTester
+               responseCode int
+               responseBody string
        }
 
        var testcases = []trashTest{
                {
                        "Valid trash list from an ordinary user",
-                       RequestTester{"/trash", user_token, "PUT", good_json},
+                       RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
                        http.StatusUnauthorized,
                        "Unauthorized\n",
                },
                {
                        "Invalid trash list from an ordinary user",
-                       RequestTester{"/trash", user_token, "PUT", bad_json},
+                       RequestTester{"/trash", userToken, "PUT", badJSON, ""},
                        http.StatusUnauthorized,
                        "Unauthorized\n",
                },
                {
                        "Valid trash list from the data manager",
-                       RequestTester{"/trash", data_manager_token, "PUT", good_json},
+                       RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
                        http.StatusOK,
                        "Received 3 trash requests\n",
                },
                {
                        "Invalid trash list from the data manager",
-                       RequestTester{"/trash", data_manager_token, "PUT", bad_json},
+                       RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
                        http.StatusBadRequest,
                        "",
                },
        }
 
        for _, tst := range testcases {
-               response := IssueRequest(&tst.req)
-               ExpectStatusCode(t, tst.name, tst.response_code, response)
-               ExpectBody(t, tst.name, tst.response_body, response)
+               response := IssueRequest(s.handler, &tst.req)
+               ExpectStatusCode(c, tst.name, tst.responseCode, response)
+               ExpectBody(c, tst.name, tst.responseBody, response)
        }
 
        // The trash collector should have received one good list with 3
@@ -758,11 +1031,11 @@ func TestTrashHandler(t *testing.T) {
        for i := 0; i < 3; i++ {
                item := <-trashq.NextItem
                if _, ok := item.(TrashRequest); !ok {
-                       t.Errorf("item %v could not be parsed as a TrashRequest", item)
+                       c.Errorf("item %v could not be parsed as a TrashRequest", item)
                }
        }
 
-       expectChannelEmpty(t, trashq.NextItem)
+       expectChannelEmpty(c, trashq.NextItem)
 }
 
 // ====================
@@ -771,109 +1044,179 @@ func TestTrashHandler(t *testing.T) {
 
 // IssueTestRequest executes an HTTP request described by rt, to a
 // REST router.  It returns the HTTP response to the request.
-func IssueRequest(rt *RequestTester) *httptest.ResponseRecorder {
+func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
        response := httptest.NewRecorder()
-       body := bytes.NewReader(rt.request_body)
+       body := bytes.NewReader(rt.requestBody)
        req, _ := http.NewRequest(rt.method, rt.uri, body)
-       if rt.api_token != "" {
-               req.Header.Set("Authorization", "OAuth2 "+rt.api_token)
+       if rt.apiToken != "" {
+               req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
        }
-       loggingRouter := MakeLoggingRESTRouter()
-       loggingRouter.ServeHTTP(response, req)
+       if rt.storageClasses != "" {
+               req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
+       }
+       handler.ServeHTTP(response, req)
+       return response
+}
+
+func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
+       response := httptest.NewRecorder()
+       body := bytes.NewReader(rt.requestBody)
+       req, _ := http.NewRequest(rt.method, rt.uri, body)
+       if rt.apiToken != "" {
+               req.Header.Set("Authorization", "Bearer "+rt.apiToken)
+       }
+       handler.ServeHTTP(response, req)
        return response
 }
 
 // ExpectStatusCode checks whether a response has the specified status code,
 // and reports a test failure if not.
 func ExpectStatusCode(
-       t *testing.T,
+       c *check.C,
        testname string,
-       expected_status int,
+       expectedStatus int,
        response *httptest.ResponseRecorder) {
-       if response.Code != expected_status {
-               t.Errorf("%s: expected status %d, got %+v",
-                       testname, expected_status, response)
-       }
+       c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
 }
 
 func ExpectBody(
-       t *testing.T,
+       c *check.C,
        testname string,
-       expected_body string,
+       expectedBody string,
        response *httptest.ResponseRecorder) {
-       if expected_body != "" && response.Body.String() != expected_body {
-               t.Errorf("%s: expected response body '%s', got %+v",
-                       testname, expected_body, response)
+       if expectedBody != "" && response.Body.String() != expectedBody {
+               c.Errorf("%s: expected response body '%s', got %+v",
+                       testname, expectedBody, response)
+       }
+}
+
+// See #7121
+func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       defer func(orig *bufferPool) {
+               bufs = orig
+       }(bufs)
+       bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
+
+       ok := make(chan struct{})
+       go func() {
+               for i := 0; i < 2; i++ {
+                       response := IssueRequest(s.handler,
+                               &RequestTester{
+                                       method:      "PUT",
+                                       uri:         "/" + TestHash,
+                                       requestBody: TestBlock,
+                               })
+                       ExpectStatusCode(c,
+                               "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
+               }
+               ok <- struct{}{}
+       }()
+
+       select {
+       case <-ok:
+       case <-time.After(time.Second):
+               c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
        }
 }
 
 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
 // leak.
-func TestPutHandlerNoBufferleak(t *testing.T) {
-       defer teardown()
-
-       // Prepare two test Keep volumes.
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
+func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
        ok := make(chan bool)
        go func() {
-               for i := 0; i < maxBuffers+1; i += 1 {
+               for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
                        // Unauthenticated request, no server key
                        // => OK (unsigned response)
-                       unsigned_locator := "/" + TEST_HASH
-                       response := IssueRequest(
+                       unsignedLocator := "/" + TestHash
+                       response := IssueRequest(s.handler,
                                &RequestTester{
-                                       method:       "PUT",
-                                       uri:          unsigned_locator,
-                                       request_body: TEST_BLOCK,
+                                       method:      "PUT",
+                                       uri:         unsignedLocator,
+                                       requestBody: TestBlock,
                                })
-                       ExpectStatusCode(t,
+                       ExpectStatusCode(c,
                                "TestPutHandlerBufferleak", http.StatusOK, response)
-                       ExpectBody(t,
+                       ExpectBody(c,
                                "TestPutHandlerBufferleak",
-                               TEST_HASH_PUT_RESPONSE, response)
+                               TestHashPutResp, response)
                }
                ok <- true
        }()
        select {
        case <-time.After(20 * time.Second):
                // If the buffer pool leaks, the test goroutine hangs.
-               t.Fatal("test did not finish, assuming pool leaked")
+               c.Fatal("test did not finish, assuming pool leaked")
        case <-ok:
        }
 }
 
+func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
+       s.cluster.Collections.BlobSigning = false
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       defer func(orig *bufferPool) {
+               bufs = orig
+       }(bufs)
+       bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
+       defer bufs.Put(bufs.Get(BlockSize))
+
+       err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
+       c.Assert(err, check.IsNil)
+
+       resp := httptest.NewRecorder()
+       ok := make(chan struct{})
+       go func() {
+               ctx, cancel := context.WithCancel(context.Background())
+               req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
+               cancel()
+               s.handler.ServeHTTP(resp, req)
+               ok <- struct{}{}
+       }()
+
+       select {
+       case <-time.After(20 * time.Second):
+               c.Fatal("request took >20s, close notifier must be broken")
+       case <-ok:
+       }
+
+       ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
+       for i, v := range s.handler.volmgr.AllWritable() {
+               if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
+                       c.Errorf("volume %d got %d calls, expected 0", i, calls)
+               }
+       }
+}
+
 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
 // leak.
-func TestGetHandlerNoBufferleak(t *testing.T) {
-       defer teardown()
-
-       // Prepare two test Keep volumes. Our block is stored on the second volume.
-       KeepVM = MakeTestVolumeManager(2)
-       defer KeepVM.Close()
+func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
-       vols := KeepVM.AllWritable()
-       if err := vols[0].Put(TEST_HASH, TEST_BLOCK); err != nil {
-               t.Error(err)
+       vols := s.handler.volmgr.AllWritable()
+       if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
+               c.Error(err)
        }
 
        ok := make(chan bool)
        go func() {
-               for i := 0; i < maxBuffers+1; i += 1 {
+               for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
                        // Unauthenticated request, unsigned locator
                        // => OK
-                       unsigned_locator := "/" + TEST_HASH
-                       response := IssueRequest(
+                       unsignedLocator := "/" + TestHash
+                       response := IssueRequest(s.handler,
                                &RequestTester{
                                        method: "GET",
-                                       uri:    unsigned_locator,
+                                       uri:    unsignedLocator,
                                })
-                       ExpectStatusCode(t,
+                       ExpectStatusCode(c,
                                "Unauthenticated request, unsigned locator", http.StatusOK, response)
-                       ExpectBody(t,
+                       ExpectBody(c,
                                "Unauthenticated request, unsigned locator",
-                               string(TEST_BLOCK),
+                               string(TestBlock),
                                response)
                }
                ok <- true
@@ -881,7 +1224,188 @@ func TestGetHandlerNoBufferleak(t *testing.T) {
        select {
        case <-time.After(20 * time.Second):
                // If the buffer pool leaks, the test goroutine hangs.
-               t.Fatal("test did not finish, assuming pool leaked")
+               c.Fatal("test did not finish, assuming pool leaked")
        case <-ok:
        }
 }
+
+func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
+               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
+       }
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       rt := RequestTester{
+               method:      "PUT",
+               uri:         "/" + TestHash,
+               requestBody: TestBlock,
+       }
+
+       for _, trial := range []struct {
+               ask    string
+               expect string
+       }{
+               {"", ""},
+               {"default", "default=1"},
+               {" , default , default , ", "default=1"},
+               {"special", "extra=1, special=1"},
+               {"special, readonly", "extra=1, special=1"},
+               {"special, nonexistent", "extra=1, special=1"},
+               {"extra, special", "extra=1, special=1"},
+               {"default, special", "default=1, extra=1, special=1"},
+       } {
+               c.Logf("success case %#v", trial)
+               rt.storageClasses = trial.ask
+               resp := IssueRequest(s.handler, &rt)
+               if trial.expect == "" {
+                       // any non-empty value is correct
+                       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
+               } else {
+                       c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
+               }
+       }
+
+       for _, trial := range []struct {
+               ask string
+       }{
+               {"doesnotexist"},
+               {"doesnotexist, readonly"},
+               {"readonly"},
+       } {
+               c.Logf("failure case %#v", trial)
+               rt.storageClasses = trial.ask
+               resp := IssueRequest(s.handler, &rt)
+               c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+       }
+}
+
+func sortCommaSeparated(s string) string {
+       slice := strings.Split(s, ", ")
+       sort.Strings(slice)
+       return strings.Join(slice, ", ")
+}
+
+func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       resp := IssueRequest(s.handler, &RequestTester{
+               method:      "PUT",
+               uri:         "/" + TestHash,
+               requestBody: TestBlock,
+       })
+       c.Logf("%#v", resp)
+       c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
+       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
+}
+
+func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       // Set up Keep volumes
+       vols := s.handler.volmgr.AllWritable()
+       vols[0].Put(context.Background(), TestHash, TestBlock)
+
+       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
+
+       // unauthenticatedReq => UnauthorizedError
+       unauthenticatedReq := &RequestTester{
+               method: "PUT",
+               uri:    "/untrash/" + TestHash,
+       }
+       response := IssueRequest(s.handler, unauthenticatedReq)
+       ExpectStatusCode(c,
+               "Unauthenticated request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // notDataManagerReq => UnauthorizedError
+       notDataManagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: knownToken,
+       }
+
+       response = IssueRequest(s.handler, notDataManagerReq)
+       ExpectStatusCode(c,
+               "Non-datamanager token",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // datamanagerWithBadHashReq => StatusBadRequest
+       datamanagerWithBadHashReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/thisisnotalocator",
+               apiToken: s.cluster.SystemRootToken,
+       }
+       response = IssueRequest(s.handler, datamanagerWithBadHashReq)
+       ExpectStatusCode(c,
+               "Bad locator in untrash request",
+               http.StatusBadRequest,
+               response)
+
+       // datamanagerWrongMethodReq => StatusBadRequest
+       datamanagerWrongMethodReq := &RequestTester{
+               method:   "GET",
+               uri:      "/untrash/" + TestHash,
+               apiToken: s.cluster.SystemRootToken,
+       }
+       response = IssueRequest(s.handler, datamanagerWrongMethodReq)
+       ExpectStatusCode(c,
+               "Only PUT method is supported for untrash",
+               http.StatusMethodNotAllowed,
+               response)
+
+       // datamanagerReq => StatusOK
+       datamanagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: s.cluster.SystemRootToken,
+       }
+       response = IssueRequest(s.handler, datamanagerReq)
+       ExpectStatusCode(c,
+               "",
+               http.StatusOK,
+               response)
+       c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
+}
+
+func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
+       // Change all volumes to read-only
+       for uuid, v := range s.cluster.Volumes {
+               v.ReadOnly = true
+               s.cluster.Volumes[uuid] = v
+       }
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+
+       // datamanagerReq => StatusOK
+       datamanagerReq := &RequestTester{
+               method:   "PUT",
+               uri:      "/untrash/" + TestHash,
+               apiToken: s.cluster.SystemRootToken,
+       }
+       response := IssueRequest(s.handler, datamanagerReq)
+       ExpectStatusCode(c,
+               "No writable volumes",
+               http.StatusNotFound,
+               response)
+}
+
+func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
+       s.cluster.ManagementToken = arvadostest.ManagementToken
+       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       pingReq := &RequestTester{
+               method:   "GET",
+               uri:      "/_health/ping",
+               apiToken: arvadostest.ManagementToken,
+       }
+       response := IssueHealthCheckRequest(s.handler, pingReq)
+       ExpectStatusCode(c,
+               "",
+               http.StatusOK,
+               response)
+       want := `{"health":"OK"}`
+       if !strings.Contains(response.Body.String(), want) {
+               c.Errorf("expected response to include %s: got %s", want, response.Body.String())
+       }
+}